diff --git a/go.mod b/go.mod index c45e36236b..710e854818 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint v1.41.1 + github.com/golangci/golangci-lint v1.56.1 github.com/google/go-github/v57 v57.0.0 github.com/google/uuid v1.6.0 github.com/hashicorp/terraform-plugin-sdk v1.17.2 @@ -16,53 +16,74 @@ require ( ) require ( - 4d63.com/gochecknoglobals v0.1.0 // indirect - cloud.google.com/go v0.110.2 // indirect - cloud.google.com/go/compute v1.20.1 // indirect + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gochecknoglobals v0.2.1 // indirect + cloud.google.com/go v0.110.10 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect - cloud.google.com/go/storage v1.29.0 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/storage v1.35.1 // indirect + github.com/4meepo/tagalign v1.3.3 // indirect + github.com/Abirdcfly/dupword v0.0.13 // indirect + github.com/Antonboom/errname v0.1.12 // indirect + github.com/Antonboom/nilnil v0.1.7 // indirect + github.com/Antonboom/testifylint v1.1.1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/sprig v2.22.0+incompatible // indirect - github.com/OpenPeeDeeP/depguard v1.1.1 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/agext/levenshtein v1.2.2 // indirect + github.com/alecthomas/go-check-sumtype v0.1.4 // indirect + github.com/alexkohler/nakedret/v2 v2.0.2 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v12 v12.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/aws/aws-sdk-go v1.44.122 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/bkielbasa/cyclop v1.2.0 // indirect - github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v4 v4.2.0 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.3.0 // indirect + github.com/butuzov/mirror v1.1.0 // indirect + github.com/catenacyber/perfsprint v0.6.0 // indirect + github.com/ccojocar/zxcvbn-go v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/charithe/durationcheck v0.0.9 // indirect - github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect - github.com/daixiang0/gci v0.2.9 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/chavacava/garif v0.1.0 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.12.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/denis-tingajkin/go-header v0.4.2 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/esimonov/ifshort v1.0.4 // indirect - github.com/ettle/strcase v0.1.1 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/ettle/strcase v0.2.0 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/go-critic/go-critic v0.6.5 // indirect - github.com/go-toolsmith/astcast v1.0.0 // indirect - github.com/go-toolsmith/astcopy v1.0.2 // indirect - github.com/go-toolsmith/astequal v1.0.3 // indirect - github.com/go-toolsmith/astfmt v1.0.0 // indirect - github.com/go-toolsmith/astp v1.0.0 // indirect - github.com/go-toolsmith/strparse v1.0.0 // indirect - github.com/go-toolsmith/typep v1.0.2 // indirect - github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/ghostiam/protogetter v0.3.4 // indirect + github.com/go-critic/go-critic v0.11.0 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -70,23 +91,22 @@ require ( github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect + github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.3.5 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect + github.com/golangci/misspell v0.4.1 // indirect + github.com/golangci/revgrep v0.5.2 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect - github.com/gostaticanalysis/testutil v0.4.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -106,32 +126,38 @@ require ( github.com/hashicorp/terraform-plugin-test/v2 v2.2.1 // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jgautheron/goconst v1.5.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jgautheron/goconst v1.7.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jjti/go-spancheck v0.5.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.2 // indirect + github.com/kisielk/errcheck v1.7.0 // indirect github.com/kisielk/gotool v1.0.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.4 // indirect github.com/klauspost/compress v1.15.11 // indirect github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.6 // indirect - github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/kunwardeep/paralleltest v1.0.9 // indirect + github.com/kyoh86/exportloopref v0.1.11 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect - github.com/ldez/tagliatelle v0.3.1 // indirect + github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.1 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect + github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.6 // indirect - github.com/maratori/testpackage v1.1.0 // indirect - github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect - github.com/mgechev/revive v1.2.4 // indirect + github.com/mgechev/revive v1.3.7 // indirect github.com/mitchellh/cli v1.1.2 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/copystructure v1.0.0 // indirect @@ -140,81 +166,99 @@ require ( github.com/mitchellh/go-wordwrap v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/moricho/tparallel v0.2.1 // indirect + github.com/moricho/tparallel v0.3.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.8.3 // indirect + github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.15.2 // indirect github.com/oklog/run v1.0.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.0.5 // indirect + github.com/polyfloyd/go-errorlint v1.4.8 // indirect github.com/posener/complete v1.2.3 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/quasilyte/go-ruleguard v0.3.18 // indirect - github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect - github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/quasilyte/go-ruleguard v0.4.0 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/ryancurrah/gomodguard v1.2.4 // indirect - github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.13.1 // indirect + github.com/ryancurrah/gomodguard v1.3.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect + github.com/securego/gosec/v2 v2.18.2 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/shurcooL/graphql v0.0.0-20220606043923-3cf50f8a0a29 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/sonatard/noctx v0.0.1 // indirect - github.com/sourcegraph/go-diff v0.6.1 // indirect - github.com/spf13/afero v1.4.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sivchari/nosnakecase v1.7.0 // indirect + github.com/sivchari/tenv v1.7.1 // indirect + github.com/sonatard/noctx v0.0.2 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.0 // indirect + github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.7.1 // indirect + github.com/spf13/viper v1.12.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - github.com/tdakkota/asciicheck v0.1.1 // indirect - github.com/tetafro/godot v1.4.11 // indirect - github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.3.0 // indirect + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect + github.com/tdakkota/asciicheck v0.2.0 // indirect + github.com/tetafro/godot v1.4.16 // indirect + github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect + github.com/timonwong/loggercheck v0.9.4 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/ulikunitz/xz v0.5.10 // indirect - github.com/ultraware/funlen v0.0.3 // indirect - github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.6 // indirect + github.com/ultraware/funlen v0.1.0 // indirect + github.com/ultraware/whitespace v0.1.0 // indirect + github.com/uudashr/gocognit v1.1.2 // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect + github.com/ykadowak/zerologlint v0.1.5 // indirect github.com/zclconf/go-cty v1.8.2 // indirect github.com/zclconf/go-cty-yaml v1.0.2 // indirect + gitlab.com/bosi/decorder v0.4.1 // indirect + go-simpler.org/musttag v0.8.0 // indirect + go-simpler.org/sloglint v0.4.0 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.8.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect + golang.org/x/exp/typeparams v0.0.0-20231219180239-dc181d75b848 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.20.0 // indirect - golang.org/x/sync v0.2.0 // indirect + golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.6.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/api v0.126.0 // indirect + google.golang.org/api v0.152.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.56.3 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.3.3 // indirect - mvdan.cc/gofumpt v0.4.0 // indirect + honnef.co/go/tools v0.4.6 // indirect + mvdan.cc/gofumpt v0.6.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect + mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 // indirect ) diff --git a/go.sum b/go.sum index 405481ef09..81de7a5ad4 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,7 @@ -4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= -4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -41,35 +42,74 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go v0.110.9/go.mod h1:rpxevX/0Lqvlbc88b7Sc1SPNdyK1riNBTUU6JXhYNpM= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accessapproval v1.7.2/go.mod h1:/gShiq9/kK/h8T/eEn1BTzalDvk0mZxJlhfw0p+Xuc0= +cloud.google.com/go/accessapproval v1.7.3/go.mod h1:4l8+pwIxGTNqSf4T3ds8nLO94NQf0W/KnMNuQ9PbnP8= +cloud.google.com/go/accessapproval v1.7.4/go.mod h1:/aTEh45LzplQgFYdQdwPMR9YdX0UlhBmvB84uAmQKUc= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/accesscontextmanager v1.8.2/go.mod h1:E6/SCRM30elQJ2PKtFMs2YhfJpZSNcJyejhuzoId4Zk= +cloud.google.com/go/accesscontextmanager v1.8.3/go.mod h1:4i/JkF2JiFbhLnnpnfoTX5vRXfhf9ukhU1ANOTALTOQ= +cloud.google.com/go/accesscontextmanager v1.8.4/go.mod h1:ParU+WbMpD34s5JFEnGAnPBYAgUHozaTmDJU7aCU9+M= cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/aiplatform v1.50.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= +cloud.google.com/go/aiplatform v1.51.0/go.mod h1:IRc2b8XAMTa9ZmfJV1BCCQbieWWvDnP1A8znyz5N7y4= +cloud.google.com/go/aiplatform v1.51.1/go.mod h1:kY3nIMAVQOK2XDqDPHaOuD9e+FdMA6OOpfBjsvaFSOo= +cloud.google.com/go/aiplatform v1.51.2/go.mod h1:hCqVYB3mY45w99TmetEoe8eCQEwZEp9WHxeZdcv9phw= +cloud.google.com/go/aiplatform v1.52.0/go.mod h1:pwZMGvqe0JRkI1GWSZCtnAfrR4K1bv65IHILGA//VEU= cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/analytics v0.21.4/go.mod h1:zZgNCxLCy8b2rKKVfC1YkC2vTrpfZmeRCySM3aUbskA= +cloud.google.com/go/analytics v0.21.5/go.mod h1:BQtOBHWTlJ96axpPPnw5CvGJ6i3Ve/qX2fTxR8qWyr8= +cloud.google.com/go/analytics v0.21.6/go.mod h1:eiROFQKosh4hMaNhF85Oc9WO97Cpa7RggD40e/RBy8w= cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigateway v1.6.2/go.mod h1:CwMC90nnZElorCW63P2pAYm25AtQrHfuOkbRSHj0bT8= +cloud.google.com/go/apigateway v1.6.3/go.mod h1:k68PXWpEs6BVDTtnLQAyG606Q3mz8pshItwPXjgv44Y= +cloud.google.com/go/apigateway v1.6.4/go.mod h1:0EpJlVGH5HwAN4VF4Iec8TAzGN1aQgbxAWGJsnPCGGY= cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeconnect v1.6.2/go.mod h1:s6O0CgXT9RgAxlq3DLXvG8riw8PYYbU/v25jqP3Dy18= +cloud.google.com/go/apigeeconnect v1.6.3/go.mod h1:peG0HFQ0si2bN15M6QSjEW/W7Gy3NYkWGz7pFz13cbo= +cloud.google.com/go/apigeeconnect v1.6.4/go.mod h1:CapQCWZ8TCjnU0d7PobxhpOdVz/OVJ2Hr/Zcuu1xFx0= cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/apigeeregistry v0.7.2/go.mod h1:9CA2B2+TGsPKtfi3F7/1ncCCsL62NXBRfM6iPoGSM+8= +cloud.google.com/go/apigeeregistry v0.8.1/go.mod h1:MW4ig1N4JZQsXmBSwH4rwpgDonocz7FPBSw6XPGHmYw= +cloud.google.com/go/apigeeregistry v0.8.2/go.mod h1:h4v11TDGdeXJDJvImtgK2AFVvMIgGWjSb0HRnBSjcX8= cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= @@ -78,10 +118,18 @@ cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodC cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/appengine v1.8.2/go.mod h1:WMeJV9oZ51pvclqFN2PqHoGnys7rK0rz6s3Mp6yMvDo= +cloud.google.com/go/appengine v1.8.3/go.mod h1:2oUPZ1LVZ5EXi+AF1ihNAF+S8JrzQ3till5m9VQkrsk= +cloud.google.com/go/appengine v1.8.4/go.mod h1:TZ24v+wXBujtkK77CXCpjZbnuTvsFNT41MUaZ28D6vg= cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/area120 v0.8.2/go.mod h1:a5qfo+x77SRLXnCynFWPUZhnZGeSgvQ+Y0v1kSItkh4= +cloud.google.com/go/area120 v0.8.3/go.mod h1:5zj6pMzVTH+SVHljdSKC35sriR/CVvQZzG/Icdyriw0= +cloud.google.com/go/area120 v0.8.4/go.mod h1:jfawXjxf29wyBXr48+W+GyX/f8fflxp642D/bb9v68M= cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= @@ -90,6 +138,11 @@ cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1 cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/artifactregistry v1.14.2/go.mod h1:Xk+QbsKEb0ElmyeMfdHAey41B+qBq3q5R5f5xD4XT3U= +cloud.google.com/go/artifactregistry v1.14.3/go.mod h1:A2/E9GXnsyXl7GUvQ/2CjHA+mVRoWAXC0brg2os+kNI= +cloud.google.com/go/artifactregistry v1.14.4/go.mod h1:SJJcZTMv6ce0LDMUnihCN7WSrI+kBSFV0KIKo8S8aYU= +cloud.google.com/go/artifactregistry v1.14.6/go.mod h1:np9LSFotNWHcjnOgh8UVK0RFPCTUGbO0ve3384xyHfE= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= @@ -98,27 +151,56 @@ cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAt cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/asset v1.15.0/go.mod h1:tpKafV6mEut3+vN9ScGvCHXHj7FALFVta+okxFECHcg= +cloud.google.com/go/asset v1.15.1/go.mod h1:yX/amTvFWRpp5rcFq6XbCxzKT8RJUam1UoboE179jU4= +cloud.google.com/go/asset v1.15.2/go.mod h1:B6H5tclkXvXz7PD22qCA2TDxSVQfasa3iDlM89O2NXs= +cloud.google.com/go/asset v1.15.3/go.mod h1:yYLfUD4wL4X589A9tYrv4rFrba0QlDeag0CMcM5ggXU= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/assuredworkloads v1.11.2/go.mod h1:O1dfr+oZJMlE6mw0Bp0P1KZSlj5SghMBvTpZqIcUAW4= +cloud.google.com/go/assuredworkloads v1.11.3/go.mod h1:vEjfTKYyRUaIeA0bsGJceFV2JKpVRgyG2op3jfa59Zs= +cloud.google.com/go/assuredworkloads v1.11.4/go.mod h1:4pwwGNwy1RP0m+y12ef3Q/8PaiWrIDQ6nD2E8kvWI9U= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/automl v1.13.2/go.mod h1:gNY/fUmDEN40sP8amAX3MaXkxcqPIn7F1UIIPZpy4Mg= +cloud.google.com/go/automl v1.13.3/go.mod h1:Y8KwvyAZFOsMAPqUCfNu1AyclbC6ivCUF/MTwORymyY= +cloud.google.com/go/automl v1.13.4/go.mod h1:ULqwX/OLZ4hBVfKQaMtxMSTlPx0GqGbWN8uA/1EqCP8= cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/baremetalsolution v1.2.0/go.mod h1:68wi9AwPYkEWIUT4SvSGS9UJwKzNpshjHsH4lzk8iOw= +cloud.google.com/go/baremetalsolution v1.2.1/go.mod h1:3qKpKIw12RPXStwQXcbhfxVj1dqQGEvcmA+SX/mUR88= +cloud.google.com/go/baremetalsolution v1.2.2/go.mod h1:O5V6Uu1vzVelYahKfwEWRMaS3AbCkeYHy3145s1FkhM= +cloud.google.com/go/baremetalsolution v1.2.3/go.mod h1:/UAQ5xG3faDdy180rCUv47e0jvpp3BFxT+Cl0PFjw5g= cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/batch v1.4.1/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= +cloud.google.com/go/batch v1.5.0/go.mod h1:KdBmDD61K0ovcxoRHGrN6GmOBWeAOyCgKD0Mugx4Fkk= +cloud.google.com/go/batch v1.5.1/go.mod h1:RpBuIYLkQu8+CWDk3dFD/t/jOCGuUpkpX+Y0n1Xccs8= +cloud.google.com/go/batch v1.6.1/go.mod h1:urdpD13zPe6YOK+6iZs/8/x2VBRofvblLpx0t57vM98= +cloud.google.com/go/batch v1.6.3/go.mod h1:J64gD4vsNSA2O5TtDB5AAux3nJ9iV8U3ilg3JDBYejU= cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= +cloud.google.com/go/beyondcorp v1.0.1/go.mod h1:zl/rWWAFVeV+kx+X2Javly7o1EIQThU4WlkynffL/lk= +cloud.google.com/go/beyondcorp v1.0.2/go.mod h1:m8cpG7caD+5su+1eZr+TSvF6r21NdLJk4f9u4SP2Ntc= +cloud.google.com/go/beyondcorp v1.0.3/go.mod h1:HcBvnEd7eYr+HGDd5ZbuVmBYX019C6CEXBonXbCVwJo= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -132,38 +214,79 @@ cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/Zur cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/bigquery v1.55.0/go.mod h1:9Y5I3PN9kQWuid6183JFhOGOW3GcirA5LpsKCUn+2ec= +cloud.google.com/go/bigquery v1.56.0/go.mod h1:KDcsploXTEY7XT3fDQzMUZlpQLHzE4itubHrnmhUrZA= +cloud.google.com/go/bigquery v1.57.1/go.mod h1:iYzC0tGVWt1jqSzBHqCr3lrRn0u13E8e+AqowBsDgug= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/billing v1.17.0/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= +cloud.google.com/go/billing v1.17.1/go.mod h1:Z9+vZXEq+HwH7bhJkyI4OQcR6TSbeMrjlpEjO2vzY64= +cloud.google.com/go/billing v1.17.2/go.mod h1:u/AdV/3wr3xoRBk5xvUzYMS1IawOAPwQMuHgHMdljDg= +cloud.google.com/go/billing v1.17.3/go.mod h1:z83AkoZ7mZwBGT3yTnt6rSGI1OOsHSIi6a5M3mJ8NaU= +cloud.google.com/go/billing v1.17.4/go.mod h1:5DOYQStCxquGprqfuid/7haD7th74kyMBHkjO/OvDtk= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/binaryauthorization v1.7.0/go.mod h1:Zn+S6QqTMn6odcMU1zDZCJxPjU2tZPV1oDl45lWY154= +cloud.google.com/go/binaryauthorization v1.7.1/go.mod h1:GTAyfRWYgcbsP3NJogpV3yeunbUIjx2T9xVeYovtURE= +cloud.google.com/go/binaryauthorization v1.7.2/go.mod h1:kFK5fQtxEp97m92ziy+hbu+uKocka1qRRL8MVJIgjv0= +cloud.google.com/go/binaryauthorization v1.7.3/go.mod h1:VQ/nUGRKhrStlGr+8GMS8f6/vznYLkdK5vaKfdCIpvU= cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/certificatemanager v1.7.2/go.mod h1:15SYTDQMd00kdoW0+XY5d9e+JbOPjp24AvF48D8BbcQ= +cloud.google.com/go/certificatemanager v1.7.3/go.mod h1:T/sZYuC30PTag0TLo28VedIRIj1KPGcOQzjWAptHa00= +cloud.google.com/go/certificatemanager v1.7.4/go.mod h1:FHAylPe/6IIKuaRmHbjbdLhGhVQ+CWHSD5Jq0k4+cCE= cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/channel v1.17.0/go.mod h1:RpbhJsGi/lXWAUM1eF4IbQGbsfVlg2o8Iiy2/YLfVT0= +cloud.google.com/go/channel v1.17.1/go.mod h1:xqfzcOZAcP4b/hUDH0GkGg1Sd5to6di1HOJn/pi5uBQ= +cloud.google.com/go/channel v1.17.2/go.mod h1:aT2LhnftnyfQceFql5I/mP8mIbiiJS4lWqgXA815zMk= +cloud.google.com/go/channel v1.17.3/go.mod h1:QcEBuZLGGrUMm7kNj9IbU1ZfmJq2apotsV83hbxX7eE= cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/cloudbuild v1.14.1/go.mod h1:K7wGc/3zfvmYWOWwYTgF/d/UVJhS4pu+HAy7PL7mCsU= +cloud.google.com/go/cloudbuild v1.14.2/go.mod h1:Bn6RO0mBYk8Vlrt+8NLrru7WXlQ9/RDWz2uo5KG1/sg= +cloud.google.com/go/cloudbuild v1.14.3/go.mod h1:eIXYWmRt3UtggLnFGx4JvXcMj4kShhVzGndL1LwleEM= cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/clouddms v1.7.0/go.mod h1:MW1dC6SOtI/tPNCciTsXtsGNEM0i0OccykPvv3hiYeM= +cloud.google.com/go/clouddms v1.7.1/go.mod h1:o4SR8U95+P7gZ/TX+YbJxehOCsM+fe6/brlrFquiszk= +cloud.google.com/go/clouddms v1.7.2/go.mod h1:Rk32TmWmHo64XqDvW7jgkFQet1tUKNVzs7oajtJT3jU= +cloud.google.com/go/clouddms v1.7.3/go.mod h1:fkN2HQQNUYInAU3NQ3vRLkV2iWs8lIdmBKOx4nrL6Hc= cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= +cloud.google.com/go/cloudtasks v1.12.2/go.mod h1:A7nYkjNlW2gUoROg1kvJrQGhJP/38UaWwsnuBDOBVUk= +cloud.google.com/go/cloudtasks v1.12.3/go.mod h1:GPVXhIOSGEaR+3xT4Fp72ScI+HjHffSS4B8+BaBB5Ys= +cloud.google.com/go/cloudtasks v1.12.4/go.mod h1:BEPu0Gtt2dU6FxZHNqqNdGqIG86qyWKBPGnsb7udGY0= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= @@ -180,8 +303,13 @@ cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOV cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go/compute v1.23.2/go.mod h1:JJ0atRC0J/oWYiiVBmsSsrRnh92DhZPG4hFDcR04Rns= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= @@ -190,15 +318,32 @@ cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2Aawl cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/contactcenterinsights v1.11.0/go.mod h1:hutBdImE4XNZ1NV4vbPJKSFOnQruhC5Lj9bZqWMTKiU= +cloud.google.com/go/contactcenterinsights v1.11.1/go.mod h1:FeNP3Kg8iteKM80lMwSk3zZZKVxr+PGnAId6soKuXwE= +cloud.google.com/go/contactcenterinsights v1.11.2/go.mod h1:A9PIR5ov5cRcd28KlDbmmXE8Aay+Gccer2h4wzkYFso= +cloud.google.com/go/contactcenterinsights v1.11.3/go.mod h1:HHX5wrz5LHVAwfI2smIotQG9x8Qd6gYilaHcLLLmNis= cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/container v1.26.0/go.mod h1:YJCmRet6+6jnYYRS000T6k0D0xUXQgBSaJ7VwI8FBj4= +cloud.google.com/go/container v1.26.1/go.mod h1:5smONjPRUxeEpDG7bMKWfDL4sauswqEtnBK1/KKpR04= +cloud.google.com/go/container v1.26.2/go.mod h1:YlO84xCt5xupVbLaMY4s3XNE79MUJ+49VmkInr6HvF4= +cloud.google.com/go/container v1.27.1/go.mod h1:b1A1gJeTBXVLQ6GGw9/9M4FG94BEGsqJ5+t4d/3N7O4= cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/containeranalysis v0.11.0/go.mod h1:4n2e99ZwpGxpNcz+YsFT1dfOHPQFGcAC8FN2M2/ne/U= +cloud.google.com/go/containeranalysis v0.11.1/go.mod h1:rYlUOM7nem1OJMKwE1SadufX0JP3wnXj844EtZAwWLY= +cloud.google.com/go/containeranalysis v0.11.2/go.mod h1:xibioGBC1MD2j4reTyV1xY1/MvKaz+fyM9ENWhmIeP8= +cloud.google.com/go/containeranalysis v0.11.3/go.mod h1:kMeST7yWFQMGjiG9K7Eov+fPNQcGhb8mXj/UcTiWw9U= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= @@ -207,44 +352,98 @@ cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOX cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= +cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/datacatalog v1.17.1/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.0/go.mod h1:nCSYFHgtxh2MiEktWIz71s/X+7ds/UT9kp0PC7waCzE= +cloud.google.com/go/datacatalog v1.18.1/go.mod h1:TzAWaz+ON1tkNr4MOcak8EBHX7wIRX/gZKM+yTVsv+A= +cloud.google.com/go/datacatalog v1.18.2/go.mod h1:SPVgWW2WEMuWHA+fHodYjmxPiMqcOiWfhc9OD5msigk= +cloud.google.com/go/datacatalog v1.18.3/go.mod h1:5FR6ZIF8RZrtml0VUao22FxhdjkoG+a0866rEnObryM= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataflow v0.9.2/go.mod h1:vBfdBZ/ejlTaYIGB3zB4T08UshH70vbtZeMD+urnUSo= +cloud.google.com/go/dataflow v0.9.3/go.mod h1:HI4kMVjcHGTs3jTHW/kv3501YW+eloiJSLxkJa/vqFE= +cloud.google.com/go/dataflow v0.9.4/go.mod h1:4G8vAkHYCSzU8b/kmsoR2lWyHJD85oMJPHMtan40K8w= cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/dataform v0.8.2/go.mod h1:X9RIqDs6NbGPLR80tnYoPNiO1w0wenKTb8PxxlhTMKM= +cloud.google.com/go/dataform v0.8.3/go.mod h1:8nI/tvv5Fso0drO3pEjtowz58lodx8MVkdV2q0aPlqg= +cloud.google.com/go/dataform v0.9.1/go.mod h1:pWTg+zGQ7i16pyn0bS1ruqIE91SdL2FDMvEYu/8oQxs= cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datafusion v1.7.2/go.mod h1:62K2NEC6DRlpNmI43WHMWf9Vg/YvN6QVi8EVwifElI0= +cloud.google.com/go/datafusion v1.7.3/go.mod h1:eoLt1uFXKGBq48jy9LZ+Is8EAVLnmn50lNncLzwYokE= +cloud.google.com/go/datafusion v1.7.4/go.mod h1:BBs78WTOLYkT4GVZIXQCZT3GFpkpDN4aBY4NDX/jVlM= cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/datalabeling v0.8.2/go.mod h1:cyDvGHuJWu9U/cLDA7d8sb9a0tWLEletStu2sTmg3BE= +cloud.google.com/go/datalabeling v0.8.3/go.mod h1:tvPhpGyS/V7lqjmb3V0TaDdGvhzgR1JoW7G2bpi2UTI= +cloud.google.com/go/datalabeling v0.8.4/go.mod h1:Z1z3E6LHtffBGrNUkKwbwbDxTiXEApLzIgmymj8A3S8= cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.9.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataplex v1.10.1/go.mod h1:1MzmBv8FvjYfc7vDdxhnLFNskikkB+3vl475/XdCDhs= +cloud.google.com/go/dataplex v1.10.2/go.mod h1:xdC8URdTrCrZMW6keY779ZT1cTOfV8KEPNsw+LTRT1Y= +cloud.google.com/go/dataplex v1.11.1/go.mod h1:mHJYQQ2VEJHsyoC0OdNyy988DvEbPhqFs5OOLffLX0c= cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataproc/v2 v2.2.0/go.mod h1:lZR7AQtwZPvmINx5J87DSOOpTfof9LVZju6/Qo4lmcY= +cloud.google.com/go/dataproc/v2 v2.2.1/go.mod h1:QdAJLaBjh+l4PVlVZcmrmhGccosY/omC1qwfQ61Zv/o= +cloud.google.com/go/dataproc/v2 v2.2.2/go.mod h1:aocQywVmQVF4i8CL740rNI/ZRpsaaC1Wh2++BJ7HEJ4= +cloud.google.com/go/dataproc/v2 v2.2.3/go.mod h1:G5R6GBc9r36SXv/RtZIVfB8SipI+xVn0bX5SxUzVYbY= cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= +cloud.google.com/go/dataqna v0.8.2/go.mod h1:KNEqgx8TTmUipnQsScOoDpq/VlXVptUqVMZnt30WAPs= +cloud.google.com/go/dataqna v0.8.3/go.mod h1:wXNBW2uvc9e7Gl5k8adyAMnLush1KVV6lZUhB+rqNu4= +cloud.google.com/go/dataqna v0.8.4/go.mod h1:mySRKjKg5Lz784P6sCov3p1QD+RZQONRMRjzGNcFd0c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastore v1.14.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= +cloud.google.com/go/datastore v1.15.0/go.mod h1:GAeStMBIt9bPS7jMJA85kgkpsMkvseWWXiaHya9Jes8= cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/datastream v1.10.1/go.mod h1:7ngSYwnw95YFyTd5tOGBxHlOZiL+OtpjheqU7t2/s/c= +cloud.google.com/go/datastream v1.10.2/go.mod h1:W42TFgKAs/om6x/CdXX5E4oiAsKlH+e8MTGy81zdYt0= +cloud.google.com/go/datastream v1.10.3/go.mod h1:YR0USzgjhqA/Id0Ycu1VvZe8hEWwrkjuXrGbzeDOSEA= cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/deploy v1.13.1/go.mod h1:8jeadyLkH9qu9xgO3hVWw8jVr29N1mnW42gRJT8GY6g= +cloud.google.com/go/deploy v1.14.1/go.mod h1:N8S0b+aIHSEeSr5ORVoC0+/mOPUysVt8ae4QkZYolAw= +cloud.google.com/go/deploy v1.14.2/go.mod h1:e5XOUI5D+YGldyLNZ21wbp9S8otJbBE4i88PtO9x/2g= cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= @@ -253,36 +452,79 @@ cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFM cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dialogflow v1.43.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= +cloud.google.com/go/dialogflow v1.44.0/go.mod h1:pDUJdi4elL0MFmt1REMvFkdsUTYSHq+rTCS8wg0S3+M= +cloud.google.com/go/dialogflow v1.44.1/go.mod h1:n/h+/N2ouKOO+rbe/ZnI186xImpqvCVj2DdsWS/0EAk= +cloud.google.com/go/dialogflow v1.44.2/go.mod h1:QzFYndeJhpVPElnFkUXxdlptx0wPnBWLCBT9BvtC3/c= +cloud.google.com/go/dialogflow v1.44.3/go.mod h1:mHly4vU7cPXVweuB5R0zsYKPMzy240aQdAu06SqBbAQ= cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/dlp v1.10.2/go.mod h1:ZbdKIhcnyhILgccwVDzkwqybthh7+MplGC3kZVZsIOQ= +cloud.google.com/go/dlp v1.10.3/go.mod h1:iUaTc/ln8I+QT6Ai5vmuwfw8fqTk2kaz0FvCwhLCom0= +cloud.google.com/go/dlp v1.11.1/go.mod h1:/PA2EnioBeXTL/0hInwgj0rfsQb3lpE3R8XUJxqUNKI= cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/documentai v1.22.1/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= +cloud.google.com/go/documentai v1.23.0/go.mod h1:LKs22aDHbJv7ufXuPypzRO7rG3ALLJxzdCXDPutw4Qc= +cloud.google.com/go/documentai v1.23.2/go.mod h1:Q/wcRT+qnuXOpjAkvOV4A+IeQl04q2/ReT7SSbytLSo= +cloud.google.com/go/documentai v1.23.4/go.mod h1:4MYAaEMnADPN1LPN5xboDR5QVB6AgsaxgFdJhitlE2Y= +cloud.google.com/go/documentai v1.23.5/go.mod h1:ghzBsyVTiVdkfKaUCum/9bGBEyBjDO4GfooEcYKhN+g= cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/domains v0.9.2/go.mod h1:3YvXGYzZG1Temjbk7EyGCuGGiXHJwVNmwIf+E/cUp5I= +cloud.google.com/go/domains v0.9.3/go.mod h1:29k66YNDLDY9LCFKpGFeh6Nj9r62ZKm5EsUJxAl84KU= +cloud.google.com/go/domains v0.9.4/go.mod h1:27jmJGShuXYdUNjyDG0SodTfT5RwLi7xmH334Gvi3fY= cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= +cloud.google.com/go/edgecontainer v1.1.2/go.mod h1:wQRjIzqxEs9e9wrtle4hQPSR1Y51kqN75dgF7UllZZ4= +cloud.google.com/go/edgecontainer v1.1.3/go.mod h1:Ll2DtIABzEfaxaVSbwj3QHFaOOovlDFiWVDu349jSsA= +cloud.google.com/go/edgecontainer v1.1.4/go.mod h1:AvFdVuZuVGdgaE5YvlL1faAoa1ndRR/5XhXZvPBHbsE= cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/essentialcontacts v1.6.3/go.mod h1:yiPCD7f2TkP82oJEFXFTou8Jl8L6LBRPeBEkTaO0Ggo= +cloud.google.com/go/essentialcontacts v1.6.4/go.mod h1:iju5Vy3d9tJUg0PYMd1nHhjV7xoCXaOAVabrwLaPBEM= +cloud.google.com/go/essentialcontacts v1.6.5/go.mod h1:jjYbPzw0x+yglXC890l6ECJWdYeZ5dlYACTFL0U/VuM= cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/eventarc v1.13.1/go.mod h1:EqBxmGHFrruIara4FUQ3RHlgfCn7yo1HYsu2Hpt/C3Y= +cloud.google.com/go/eventarc v1.13.2/go.mod h1:X9A80ShVu19fb4e5sc/OLV7mpFUKZMwfJFeeWhcIObM= +cloud.google.com/go/eventarc v1.13.3/go.mod h1:RWH10IAZIRcj1s/vClXkBgMHwh59ts7hSWcqD3kaclg= cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/filestore v1.7.2/go.mod h1:TYOlyJs25f/omgj+vY7/tIG/E7BX369triSPzE4LdgE= +cloud.google.com/go/filestore v1.7.3/go.mod h1:Qp8WaEERR3cSkxToxFPHh/b8AACkSut+4qlCjAmKTV0= +cloud.google.com/go/filestore v1.7.4/go.mod h1:S5JCxIbFjeBhWMTfIYH2Jx24J6BqjwpkkPl+nBA5DlI= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/firestore v1.13.0/go.mod h1:QojqqOh8IntInDUSTAh0c8ZsPYAr68Ma8c5DWOy8xb8= +cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= @@ -290,28 +532,56 @@ cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5Uwt cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/functions v1.15.2/go.mod h1:CHAjtcR6OU4XF2HuiVeriEdELNcnvRZSk1Q8RMqy4lE= +cloud.google.com/go/functions v1.15.3/go.mod h1:r/AMHwBheapkkySEhiZYLDBwVJCdlRwsm4ieJu35/Ug= +cloud.google.com/go/functions v1.15.4/go.mod h1:CAsTc3VlRMVvx+XqXxKqVevguqJpnVip4DdonFsX28I= cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.1/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkebackup v1.3.2/go.mod h1:OMZbXzEJloyXMC7gqdSB+EOEQ1AKcpGYvO3s1ec5ixk= +cloud.google.com/go/gkebackup v1.3.3/go.mod h1:eMk7/wVV5P22KBakhQnJxWSVftL1p4VBFLpv0kIft7I= +cloud.google.com/go/gkebackup v1.3.4/go.mod h1:gLVlbM8h/nHIs09ns1qx3q3eaXcGSELgNu1DWXYz1HI= cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkeconnect v0.8.2/go.mod h1:6nAVhwchBJYgQCXD2pHBFQNiJNyAd/wyxljpaa6ZPrY= +cloud.google.com/go/gkeconnect v0.8.3/go.mod h1:i9GDTrfzBSUZGCe98qSu1B8YB8qfapT57PenIb820Jo= +cloud.google.com/go/gkeconnect v0.8.4/go.mod h1:84hZz4UMlDCKl8ifVW8layK4WHlMAFeq8vbzjU0yJkw= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkehub v0.14.2/go.mod h1:iyjYH23XzAxSdhrbmfoQdePnlMj2EWcvnR+tHdBQsCY= +cloud.google.com/go/gkehub v0.14.3/go.mod h1:jAl6WafkHHW18qgq7kqcrXYzN08hXeK/Va3utN8VKg8= +cloud.google.com/go/gkehub v0.14.4/go.mod h1:Xispfu2MqnnFt8rV/2/3o73SK1snL8s9dYJ9G2oQMfc= cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gkemulticloud v1.0.1/go.mod h1:AcrGoin6VLKT/fwZEYuqvVominLriQBCKmbjtnbMjG8= +cloud.google.com/go/gkemulticloud v1.0.2/go.mod h1:+ee5VXxKb3H1l4LZAcgWB/rvI16VTNTrInWxDjAGsGo= +cloud.google.com/go/gkemulticloud v1.0.3/go.mod h1:7NpJBN94U6DY1xHIbsDqB2+TFZUfjLUKLjUX8NGLor0= cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= +cloud.google.com/go/gsuiteaddons v1.6.2/go.mod h1:K65m9XSgs8hTF3X9nNTPi8IQueljSdYo9F+Mi+s4MyU= +cloud.google.com/go/gsuiteaddons v1.6.3/go.mod h1:sCFJkZoMrLZT3JTb8uJqgKPNshH2tfXeCwTFRebTq48= +cloud.google.com/go/gsuiteaddons v1.6.4/go.mod h1:rxtstw7Fx22uLOXBpsvb9DUbC+fiXs7rF4U29KHM/pE= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= @@ -320,20 +590,40 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= +cloud.google.com/go/iam v1.1.4/go.mod h1:l/rg8l1AaA+VFMho/HYx2Vv6xinPSLMF8qfhRPIZ0L8= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/iap v1.9.0/go.mod h1:01OFxd1R+NFrg78S+hoPV5PxEzv22HXaNqUUlmNHFuY= +cloud.google.com/go/iap v1.9.1/go.mod h1:SIAkY7cGMLohLSdBR25BuIxO+I4fXJiL06IBL7cy/5Q= +cloud.google.com/go/iap v1.9.2/go.mod h1:GwDTOs047PPSnwRD0Us5FKf4WDRcVvHg1q9WVkKBhdI= +cloud.google.com/go/iap v1.9.3/go.mod h1:DTdutSZBqkkOm2HEOTBzhZxh2mwwxshfD/h3yofAiCw= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/ids v1.4.2/go.mod h1:3vw8DX6YddRu9BncxuzMyWn0g8+ooUjI2gslJ7FH3vk= +cloud.google.com/go/ids v1.4.3/go.mod h1:9CXPqI3GedjmkjbMWCUhMZ2P2N7TUMzAkVXYEH2orYU= +cloud.google.com/go/ids v1.4.4/go.mod h1:z+WUc2eEl6S/1aZWzwtVNWoSZslgzPxAboS0lZX0HjI= cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/iot v1.7.2/go.mod h1:q+0P5zr1wRFpw7/MOgDXrG/HVA+l+cSwdObffkrpnSg= +cloud.google.com/go/iot v1.7.3/go.mod h1:t8itFchkol4VgNbHnIq9lXoOOtHNR3uAACQMYbN9N4I= +cloud.google.com/go/iot v1.7.4/go.mod h1:3TWqDVvsddYBG++nHSZmluoCAVGr1hAcabbWZNKEZLk= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= @@ -341,91 +631,200 @@ cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4 cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= +cloud.google.com/go/kms v1.15.4/go.mod h1:L3Sdj6QTHK8dfwK5D1JLsAyELsNMnd3tAIwGS4ltKpc= +cloud.google.com/go/kms v1.15.5/go.mod h1:cU2H5jnp6G2TDpUGZyqTCoy1n16fbubHZjmVXSMtwDI= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/language v1.11.0/go.mod h1:uDx+pFDdAKTY8ehpWbiXyQdz8tDSYLJbQcXsCkjYyvQ= +cloud.google.com/go/language v1.11.1/go.mod h1:Xyid9MG9WOX3utvDbpX7j3tXDmmDooMyMDqgUVpH17U= +cloud.google.com/go/language v1.12.1/go.mod h1:zQhalE2QlQIxbKIZt54IASBzmZpN/aDASea5zl1l+J4= +cloud.google.com/go/language v1.12.2/go.mod h1:9idWapzr/JKXBBQ4lWqVX/hcadxB194ry20m/bTrhWc= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= +cloud.google.com/go/lifesciences v0.9.2/go.mod h1:QHEOO4tDzcSAzeJg7s2qwnLM2ji8IRpQl4p6m5Z9yTA= +cloud.google.com/go/lifesciences v0.9.3/go.mod h1:gNGBOJV80IWZdkd+xz4GQj4mbqaz737SCLHn2aRhQKM= +cloud.google.com/go/lifesciences v0.9.4/go.mod h1:bhm64duKhMi7s9jR9WYJYvjAFJwRqNj+Nia7hF0Z7JA= cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= +cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUzqv2YttZiveCs= +cloud.google.com/go/longrunning v0.5.3/go.mod h1:y/0ga59EYu58J6SHmmQOvekvND2qODbu8ywBBW7EK7Y= +cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/managedidentities v1.6.2/go.mod h1:5c2VG66eCa0WIq6IylRk3TBW83l161zkFvCj28X7jn8= +cloud.google.com/go/managedidentities v1.6.3/go.mod h1:tewiat9WLyFN0Fi7q1fDD5+0N4VUoL0SCX0OTCthZq4= +cloud.google.com/go/managedidentities v1.6.4/go.mod h1:WgyaECfHmF00t/1Uk8Oun3CQ2PGUtjc3e9Alh79wyiM= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/maps v1.4.1/go.mod h1:BxSa0BnW1g2U2gNdbq5zikLlHUuHW0GFWh7sgML2kIY= +cloud.google.com/go/maps v1.5.1/go.mod h1:NPMZw1LJwQZYCfz4y+EIw+SI+24A4bpdFJqdKVr0lt4= +cloud.google.com/go/maps v1.6.1/go.mod h1:4+buOHhYXFBp58Zj/K+Lc1rCmJssxxF4pJ5CJnhdz18= cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/mediatranslation v0.8.2/go.mod h1:c9pUaDRLkgHRx3irYE5ZC8tfXGrMYwNZdmDqKMSfFp8= +cloud.google.com/go/mediatranslation v0.8.3/go.mod h1:F9OnXTy336rteOEywtY7FOqCk+J43o2RF638hkOQl4Y= +cloud.google.com/go/mediatranslation v0.8.4/go.mod h1:9WstgtNVAdN53m6TQa5GjIjLqKQPXe74hwSCxUP6nj4= cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/memcache v1.10.2/go.mod h1:f9ZzJHLBrmd4BkguIAa/l/Vle6uTHzHokdnzSWOdQ6A= +cloud.google.com/go/memcache v1.10.3/go.mod h1:6z89A41MT2DVAW0P4iIRdu5cmRTsbsFn4cyiIx8gbwo= +cloud.google.com/go/memcache v1.10.4/go.mod h1:v/d8PuC8d1gD6Yn5+I3INzLR01IDn0N4Ym56RgikSI0= cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/metastore v1.13.0/go.mod h1:URDhpG6XLeh5K+Glq0NOt74OfrPKTwS62gEPZzb5SOk= +cloud.google.com/go/metastore v1.13.1/go.mod h1:IbF62JLxuZmhItCppcIfzBBfUFq0DIB9HPDoLgWrVOU= +cloud.google.com/go/metastore v1.13.2/go.mod h1:KS59dD+unBji/kFebVp8XU/quNSyo8b6N6tPGspKszA= +cloud.google.com/go/metastore v1.13.3/go.mod h1:K+wdjXdtkdk7AQg4+sXS8bRrQa9gcOr+foOMF2tqINE= cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/monitoring v1.16.0/go.mod h1:Ptp15HgAyM1fNICAojDMoNc/wUmn67mLHQfyqbw+poY= +cloud.google.com/go/monitoring v1.16.1/go.mod h1:6HsxddR+3y9j+o/cMJH6q/KJ/CBTvM/38L/1m7bTRJ4= +cloud.google.com/go/monitoring v1.16.2/go.mod h1:B44KGwi4ZCF8Rk/5n+FWeispDXoKSk9oss2QNlXJBgc= +cloud.google.com/go/monitoring v1.16.3/go.mod h1:KwSsX5+8PnXv5NJnICZzW2R8pWTis8ypC4zmdRD63Tw= cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkconnectivity v1.13.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= +cloud.google.com/go/networkconnectivity v1.14.0/go.mod h1:SAnGPes88pl7QRLUen2HmcBSE9AowVAcdug8c0RSBFk= +cloud.google.com/go/networkconnectivity v1.14.1/go.mod h1:LyGPXR742uQcDxZ/wv4EI0Vu5N6NKJ77ZYVnDe69Zug= +cloud.google.com/go/networkconnectivity v1.14.2/go.mod h1:5UFlwIisZylSkGG1AdwK/WZUaoz12PKu6wODwIbFzJo= +cloud.google.com/go/networkconnectivity v1.14.3/go.mod h1:4aoeFdrJpYEXNvrnfyD5kIzs8YtHg945Og4koAjHQek= cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networkmanagement v1.9.0/go.mod h1:UTUaEU9YwbCAhhz3jEOHr+2/K/MrBk2XxOLS89LQzFw= +cloud.google.com/go/networkmanagement v1.9.1/go.mod h1:CCSYgrQQvW73EJawO2QamemYcOb57LvrDdDU51F0mcI= +cloud.google.com/go/networkmanagement v1.9.2/go.mod h1:iDGvGzAoYRghhp4j2Cji7sF899GnfGQcQRQwgVOWnDw= +cloud.google.com/go/networkmanagement v1.9.3/go.mod h1:y7WMO1bRLaP5h3Obm4tey+NquUvB93Co1oh4wpL+XcU= cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/networksecurity v0.9.2/go.mod h1:jG0SeAttWzPMUILEHDUvFYdQTl8L/E/KC8iZDj85lEI= +cloud.google.com/go/networksecurity v0.9.3/go.mod h1:l+C0ynM6P+KV9YjOnx+kk5IZqMSLccdBqW6GUoF4p/0= +cloud.google.com/go/networksecurity v0.9.4/go.mod h1:E9CeMZ2zDsNBkr8axKSYm8XyTqNhiCHf1JO/Vb8mD1w= cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/notebooks v1.10.0/go.mod h1:SOPYMZnttHxqot0SGSFSkRrwE29eqnKPBJFqgWmiK2k= +cloud.google.com/go/notebooks v1.10.1/go.mod h1:5PdJc2SgAybE76kFQCWrTfJolCOUQXF97e+gteUUA6A= +cloud.google.com/go/notebooks v1.11.1/go.mod h1:V2Zkv8wX9kDCGRJqYoI+bQAaoVeE5kSiz4yYHd2yJwQ= +cloud.google.com/go/notebooks v1.11.2/go.mod h1:z0tlHI/lREXC8BS2mIsUeR3agM1AkgLiS+Isov3SS70= cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/optimization v1.5.0/go.mod h1:evo1OvTxeBRBu6ydPlrIRizKY/LJKo/drDMMRKqGEUU= +cloud.google.com/go/optimization v1.5.1/go.mod h1:NC0gnUD5MWVAF7XLdoYVPmYYVth93Q6BUzqAq3ZwtV8= +cloud.google.com/go/optimization v1.6.1/go.mod h1:hH2RYPTTM9e9zOiTaYPTiGPcGdNZVnBSBxjIAJzUkqo= +cloud.google.com/go/optimization v1.6.2/go.mod h1:mWNZ7B9/EyMCcwNl1frUGEuY6CPijSkz88Fz2vwKPOY= cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orchestration v1.8.2/go.mod h1:T1cP+6WyTmh6LSZzeUhvGf0uZVmJyTx7t8z7Vg87+A0= +cloud.google.com/go/orchestration v1.8.3/go.mod h1:xhgWAYqlbYjlz2ftbFghdyqENYW+JXuhBx9KsjMoGHs= +cloud.google.com/go/orchestration v1.8.4/go.mod h1:d0lywZSVYtIoSZXb0iFjv9SaL13PGyVOKDxqGxEf/qI= cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/orgpolicy v1.11.2/go.mod h1:biRDpNwfyytYnmCRWZWxrKF22Nkz9eNVj9zyaBdpm1o= +cloud.google.com/go/orgpolicy v1.11.3/go.mod h1:oKAtJ/gkMjum5icv2aujkP4CxROxPXsBbYGCDbPO8MM= +cloud.google.com/go/orgpolicy v1.11.4/go.mod h1:0+aNV/nrfoTQ4Mytv+Aw+stBDBjNf4d8fYRA9herfJI= cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/osconfig v1.12.2/go.mod h1:eh9GPaMZpI6mEJEuhEjUJmaxvQ3gav+fFEJon1Y8Iw0= +cloud.google.com/go/osconfig v1.12.3/go.mod h1:L/fPS8LL6bEYUi1au832WtMnPeQNT94Zo3FwwV1/xGM= +cloud.google.com/go/osconfig v1.12.4/go.mod h1:B1qEwJ/jzqSRslvdOCI8Kdnp0gSng0xW4LOnIebQomA= cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/oslogin v1.11.0/go.mod h1:8GMTJs4X2nOAUVJiPGqIWVcDaF0eniEto3xlOxaboXE= +cloud.google.com/go/oslogin v1.11.1/go.mod h1:OhD2icArCVNUxKqtK0mcSmKL7lgr0LVlQz+v9s1ujTg= +cloud.google.com/go/oslogin v1.12.1/go.mod h1:VfwTeFJGbnakxAY236eN8fsnglLiVXndlbcNomY4iZU= +cloud.google.com/go/oslogin v1.12.2/go.mod h1:CQ3V8Jvw4Qo4WRhNPF0o+HAM4DiLuE27Ul9CX9g2QdY= cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/phishingprotection v0.8.2/go.mod h1:LhJ91uyVHEYKSKcMGhOa14zMMWfbEdxG032oT6ECbC8= +cloud.google.com/go/phishingprotection v0.8.3/go.mod h1:3B01yO7T2Ra/TMojifn8EoGd4G9jts/6cIO0DgDY9J8= +cloud.google.com/go/phishingprotection v0.8.4/go.mod h1:6b3kNPAc2AQ6jZfFHioZKg9MQNybDg4ixFd4RPZZ2nE= cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/policytroubleshooter v1.9.0/go.mod h1:+E2Lga7TycpeSTj2FsH4oXxTnrbHJGRlKhVZBLGgU64= +cloud.google.com/go/policytroubleshooter v1.9.1/go.mod h1:MYI8i0bCrL8cW+VHN1PoiBTyNZTstCg2WUw2eVC4c4U= +cloud.google.com/go/policytroubleshooter v1.10.1/go.mod h1:5C0rhT3TDZVxAu8813bwmTvd57Phbl8mr9F4ipOsxEs= +cloud.google.com/go/policytroubleshooter v1.10.2/go.mod h1:m4uF3f6LseVEnMV6nknlN2vYGRb+75ylQwJdnOXfnv0= cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= +cloud.google.com/go/privatecatalog v0.9.2/go.mod h1:RMA4ATa8IXfzvjrhhK8J6H4wwcztab+oZph3c6WmtFc= +cloud.google.com/go/privatecatalog v0.9.3/go.mod h1:K5pn2GrVmOPjXz3T26mzwXLcKivfIJ9R5N79AFCF9UE= +cloud.google.com/go/privatecatalog v0.9.4/go.mod h1:SOjm93f+5hp/U3PqMZAHTtBtluqLygrDrVO8X8tYtG0= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -435,9 +834,12 @@ cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcd cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= @@ -446,46 +848,89 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.0/go.mod h1:QuE8EdU9dEnesG8/kG3XuJyNsjEqMlMzg3v3scCJ46c= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.1/go.mod h1:JZYZJOeZjgSSTGP4uz7NlQ4/d1w5hGmksVgM0lbEij0= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.2/go.mod h1:kpaDBOpkwD4G0GVMzG1W6Doy1tFFC97XAV3xy+Rd/pw= +cloud.google.com/go/recaptchaenterprise/v2 v2.8.3/go.mod h1:Dak54rw6lC2gBY8FBznpOCAR58wKf+R+ZSJRoeJok4w= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommendationengine v0.8.2/go.mod h1:QIybYHPK58qir9CV2ix/re/M//Ty10OxjnnhWdaKS1Y= +cloud.google.com/go/recommendationengine v0.8.3/go.mod h1:m3b0RZV02BnODE9FeSvGv1qibFo8g0OnmB/RMwYy4V8= +cloud.google.com/go/recommendationengine v0.8.4/go.mod h1:GEteCf1PATl5v5ZsQ60sTClUE0phbWmo3rQ1Js8louU= cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/recommender v1.11.0/go.mod h1:kPiRQhPyTJ9kyXPCG6u/dlPLbYfFlkwHNRwdzPVAoII= +cloud.google.com/go/recommender v1.11.1/go.mod h1:sGwFFAyI57v2Hc5LbIj+lTwXipGu9NW015rkaEM5B18= +cloud.google.com/go/recommender v1.11.2/go.mod h1:AeoJuzOvFR/emIcXdVFkspVXVTYpliRCmKNYDnyBv6Y= +cloud.google.com/go/recommender v1.11.3/go.mod h1:+FJosKKJSId1MBFeJ/TTyoGQZiEelQQIZMKYYD8ruK4= cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/redis v1.13.2/go.mod h1:0Hg7pCMXS9uz02q+LoEVl5dNHUkIQv+C/3L76fandSA= +cloud.google.com/go/redis v1.13.3/go.mod h1:vbUpCKUAZSYzFcWKmICnYgRAhTFg9r+djWqFxDYXi4U= +cloud.google.com/go/redis v1.14.1/go.mod h1:MbmBxN8bEnQI4doZPC1BzADU4HGocHBk2de3SbgOkqs= cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcemanager v1.9.2/go.mod h1:OujkBg1UZg5lX2yIyMo5Vz9O5hf7XQOSV7WxqxxMtQE= +cloud.google.com/go/resourcemanager v1.9.3/go.mod h1:IqrY+g0ZgLsihcfcmqSe+RKp1hzjXwG904B92AwBz6U= +cloud.google.com/go/resourcemanager v1.9.4/go.mod h1:N1dhP9RFvo3lUfwtfLWVxfUWq8+KUQ+XLlHLH3BoFJ0= cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/resourcesettings v1.6.2/go.mod h1:mJIEDd9MobzunWMeniaMp6tzg4I2GvD3TTmPkc8vBXk= +cloud.google.com/go/resourcesettings v1.6.3/go.mod h1:pno5D+7oDYkMWZ5BpPsb4SO0ewg3IXcmmrUZaMJrFic= +cloud.google.com/go/resourcesettings v1.6.4/go.mod h1:pYTTkWdv2lmQcjsthbZLNBP4QW140cs7wqA3DuqErVI= cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/retail v1.14.2/go.mod h1:W7rrNRChAEChX336QF7bnMxbsjugcOCPU44i5kbLiL8= +cloud.google.com/go/retail v1.14.3/go.mod h1:Omz2akDHeSlfCq8ArPKiBxlnRpKEBjUH386JYFLUvXo= +cloud.google.com/go/retail v1.14.4/go.mod h1:l/N7cMtY78yRnJqp5JW8emy7MB1nz8E4t2yfOmklYfg= cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/run v1.3.0/go.mod h1:S/osX/4jIPZGg+ssuqh6GNgg7syixKe3YnprwehzHKU= +cloud.google.com/go/run v1.3.1/go.mod h1:cymddtZOzdwLIAsmS6s+Asl4JoXIDm/K1cpZTxV4Q5s= +cloud.google.com/go/run v1.3.2/go.mod h1:SIhmqArbjdU/D9M6JoHaAqnAMKLFtXaVdNeq04NjnVE= +cloud.google.com/go/run v1.3.3/go.mod h1:WSM5pGyJ7cfYyYbONVQBN4buz42zFqwG67Q3ch07iK4= cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/scheduler v1.10.2/go.mod h1:O3jX6HRH5eKCA3FutMw375XHZJudNIKVonSCHv7ropY= +cloud.google.com/go/scheduler v1.10.3/go.mod h1:8ANskEM33+sIbpJ+R4xRfw/jzOG+ZFE8WVLy7/yGvbc= +cloud.google.com/go/scheduler v1.10.4/go.mod h1:MTuXcrJC9tqOHhixdbHDFSIuh7xZF2IysiINDuiq6NI= cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss= +cloud.google.com/go/secretmanager v1.11.3/go.mod h1:0bA2o6FabmShrEy328i67aV+65XoUFFSmVeLBn/51jI= +cloud.google.com/go/secretmanager v1.11.4/go.mod h1:wreJlbS9Zdq21lMzWmJ0XhWW2ZxgPeahsqeV/vZoJ3w= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= @@ -493,12 +938,20 @@ cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/security v1.15.2/go.mod h1:2GVE/v1oixIRHDaClVbHuPcZwAqFM28mXuAKCfMgYIg= +cloud.google.com/go/security v1.15.3/go.mod h1:gQ/7Q2JYUZZgOzqKtw9McShH+MjNvtDpL40J1cT+vBs= +cloud.google.com/go/security v1.15.4/go.mod h1:oN7C2uIZKhxCLiAAijKUCuHLZbIt/ghYEo8MqwD/Ty4= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/securitycenter v1.23.1/go.mod h1:w2HV3Mv/yKhbXKwOCu2i8bCuLtNP1IMHuiYQn4HJq5s= +cloud.google.com/go/securitycenter v1.24.1/go.mod h1:3h9IdjjHhVMXdQnmqzVnM7b0wMn/1O/U20eWVpMpZjI= +cloud.google.com/go/securitycenter v1.24.2/go.mod h1:l1XejOngggzqwr4Fa2Cn+iWZGf+aBLTXtB/vXjy5vXM= cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= @@ -510,6 +963,11 @@ cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPj cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/servicedirectory v1.11.1/go.mod h1:tJywXimEWzNzw9FvtNjsQxxJ3/41jseeILgwU/QLrGI= +cloud.google.com/go/servicedirectory v1.11.2/go.mod h1:KD9hCLhncWRV5jJphwIpugKwM5bn1x0GyVVD4NO8mGg= +cloud.google.com/go/servicedirectory v1.11.3/go.mod h1:LV+cHkomRLr67YoQy3Xq2tUXBGOs5z5bPofdq7qtiAw= cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= @@ -521,16 +979,29 @@ cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DR cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/shell v1.7.2/go.mod h1:KqRPKwBV0UyLickMn0+BY1qIyE98kKyI216sH/TuHmc= +cloud.google.com/go/shell v1.7.3/go.mod h1:cTTEz/JdaBsQAeTQ3B6HHldZudFoYBOqjteev07FbIc= +cloud.google.com/go/shell v1.7.4/go.mod h1:yLeXB8eKLxw0dpEmXQ/FjriYrBijNsONpwnWsdPqlKM= cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/spanner v1.49.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= +cloud.google.com/go/spanner v1.50.0/go.mod h1:eGj9mQGK8+hkgSVbHNQ06pQ4oS+cyc4tXXd6Dif1KoM= +cloud.google.com/go/spanner v1.51.0/go.mod h1:c5KNo5LQ1X5tJwma9rSQZsXNBDNvj4/n8BVc3LNahq0= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= +cloud.google.com/go/speech v1.19.1/go.mod h1:WcuaWz/3hOlzPFOVo9DUsblMIHwxP589y6ZMtaG+iAA= +cloud.google.com/go/speech v1.19.2/go.mod h1:2OYFfj+Ch5LWjsaSINuCZsre/789zlcCI3SY4oAi2oI= +cloud.google.com/go/speech v1.20.1/go.mod h1:wwolycgONvfz2EDU8rKuHRW3+wc9ILPsAWoikBEWavY= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -541,43 +1012,81 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w= +cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/storagetransfer v1.10.1/go.mod h1:rS7Sy0BtPviWYTTJVWCSV4QrbBitgPeuK4/FKa4IdLs= +cloud.google.com/go/storagetransfer v1.10.2/go.mod h1:meIhYQup5rg9juQJdyppnA/WLQCOguxtk1pr3/vBWzA= +cloud.google.com/go/storagetransfer v1.10.3/go.mod h1:Up8LY2p6X68SZ+WToswpQbQHnJpOty/ACcMafuey8gc= cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/talent v1.6.3/go.mod h1:xoDO97Qd4AK43rGjJvyBHMskiEf3KulgYzcH6YWOVoo= +cloud.google.com/go/talent v1.6.4/go.mod h1:QsWvi5eKeh6gG2DlBkpMaFYZYrYUnIpo34f6/V5QykY= +cloud.google.com/go/talent v1.6.5/go.mod h1:Mf5cma696HmE+P2BWJ/ZwYqeJXEeU0UqjHFXVLadEDI= cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/texttospeech v1.7.2/go.mod h1:VYPT6aTOEl3herQjFHYErTlSZJ4vB00Q2ZTmuVgluD4= +cloud.google.com/go/texttospeech v1.7.3/go.mod h1:Av/zpkcgWfXlDLRYob17lqMstGZ3GqlvJXqKMp2u8so= +cloud.google.com/go/texttospeech v1.7.4/go.mod h1:vgv0002WvR4liGuSd5BJbWy4nDn5Ozco0uJymY5+U74= cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/tpu v1.6.2/go.mod h1:NXh3NDwt71TsPZdtGWgAG5ThDfGd32X1mJ2cMaRlVgU= +cloud.google.com/go/tpu v1.6.3/go.mod h1:lxiueqfVMlSToZY1151IaZqp89ELPSrk+3HIQ5HRkbY= +cloud.google.com/go/tpu v1.6.4/go.mod h1:NAm9q3Rq2wIlGnOhpYICNI7+bpBebMJbh0yyp3aNw1Y= cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/trace v1.10.2/go.mod h1:NPXemMi6MToRFcSxRl2uDnu/qAlAQ3oULUphcHGh1vA= +cloud.google.com/go/trace v1.10.3/go.mod h1:Ke1bgfc73RV3wUFml+uQp7EsDw4dGaETLxB7Iq/r4CY= +cloud.google.com/go/trace v1.10.4/go.mod h1:Nso99EDIK8Mj5/zmB+iGr9dosS/bzWCJ8wGmE6TXNWY= cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.0/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/translate v1.9.1/go.mod h1:TWIgDZknq2+JD4iRcojgeDtqGEp154HN/uL6hMvylS8= +cloud.google.com/go/translate v1.9.2/go.mod h1:E3Tc6rUTsQkVrXW6avbUhKJSr7ZE3j7zNmqzXKHqRrY= +cloud.google.com/go/translate v1.9.3/go.mod h1:Kbq9RggWsbqZ9W5YpM94Q1Xv4dshw/gr/SHfsl5yCZ0= cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/video v1.20.0/go.mod h1:U3G3FTnsvAGqglq9LxgqzOiBc/Nt8zis8S+850N2DUM= +cloud.google.com/go/video v1.20.1/go.mod h1:3gJS+iDprnj8SY6pe0SwLeC5BUW80NjhwX7INWEuWGU= +cloud.google.com/go/video v1.20.2/go.mod h1:lrixr5JeKNThsgfM9gqtwb6Okuqzfo4VrY2xynaViTA= +cloud.google.com/go/video v1.20.3/go.mod h1:TnH/mNZKVHeNtpamsSPygSR0iHtvrR/cW1/GDjN5+GU= cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/videointelligence v1.11.2/go.mod h1:ocfIGYtIVmIcWk1DsSGOoDiXca4vaZQII1C85qtoplc= +cloud.google.com/go/videointelligence v1.11.3/go.mod h1:tf0NUaGTjU1iS2KEkGWvO5hRHeCkFK3nPo0/cOZhZAo= +cloud.google.com/go/videointelligence v1.11.4/go.mod h1:kPBMAYsTPFiQxMLmmjpcZUMklJp3nC9+ipJJtprccD8= cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= @@ -585,41 +1094,83 @@ cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vision/v2 v2.7.3/go.mod h1:V0IcLCY7W+hpMKXK1JYE0LV5llEqVmj+UJChjvA1WsM= +cloud.google.com/go/vision/v2 v2.7.4/go.mod h1:ynDKnsDN/0RtqkKxQZ2iatv3Dm9O+HfRb5djl7l4Vvw= +cloud.google.com/go/vision/v2 v2.7.5/go.mod h1:GcviprJLFfK9OLf0z8Gm6lQb6ZFUulvpZws+mm6yPLM= cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmmigration v1.7.2/go.mod h1:iA2hVj22sm2LLYXGPT1pB63mXHhrH1m/ruux9TwWLd8= +cloud.google.com/go/vmmigration v1.7.3/go.mod h1:ZCQC7cENwmSWlwyTrZcWivchn78YnFniEQYRWQ65tBo= +cloud.google.com/go/vmmigration v1.7.4/go.mod h1:yBXCmiLaB99hEl/G9ZooNx2GyzgsjKnw5fWcINRgD70= cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vmwareengine v1.0.1/go.mod h1:aT3Xsm5sNx0QShk1Jc1B8OddrxAScYLwzVoaiXfdzzk= +cloud.google.com/go/vmwareengine v1.0.2/go.mod h1:xMSNjIk8/itYrz1JA8nV3Ajg4L4n3N+ugP8JKzk3OaA= +cloud.google.com/go/vmwareengine v1.0.3/go.mod h1:QSpdZ1stlbfKtyt6Iu19M6XRxjmXO+vb5a/R6Fvy2y4= cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/vpcaccess v1.7.2/go.mod h1:mmg/MnRHv+3e8FJUjeSibVFvQF1cCy2MsFaFqxeY1HU= +cloud.google.com/go/vpcaccess v1.7.3/go.mod h1:YX4skyfW3NC8vI3Fk+EegJnlYFatA+dXK4o236EUCUc= +cloud.google.com/go/vpcaccess v1.7.4/go.mod h1:lA0KTvhtEOb/VOdnH/gwPuOzGgM+CWsmGu6bb4IoMKk= cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/webrisk v1.9.2/go.mod h1:pY9kfDgAqxUpDBOrG4w8deLfhvJmejKB0qd/5uQIPBc= +cloud.google.com/go/webrisk v1.9.3/go.mod h1:RUYXe9X/wBDXhVilss7EDLW9ZNa06aowPuinUOPCXH8= +cloud.google.com/go/webrisk v1.9.4/go.mod h1:w7m4Ib4C+OseSr2GL66m0zMBywdrVNTDKsdEsfMl7X0= cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/websecurityscanner v1.6.2/go.mod h1:7YgjuU5tun7Eg2kpKgGnDuEOXWIrh8x8lWrJT4zfmas= +cloud.google.com/go/websecurityscanner v1.6.3/go.mod h1:x9XANObUFR+83Cya3g/B9M/yoHVqzxPnFtgF8yYGAXw= +cloud.google.com/go/websecurityscanner v1.6.4/go.mod h1:mUiyMQ+dGpPPRkHgknIZeCzSHJ45+fY4F52nZFDHm2o= cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= +cloud.google.com/go/workflows v1.12.0/go.mod h1:PYhSk2b6DhZ508tj8HXKaBh+OFe+xdl0dHF/tJdzPQM= +cloud.google.com/go/workflows v1.12.1/go.mod h1:5A95OhD/edtOhQd/O741NSfIMezNTbCwLM1P1tBRGHM= +cloud.google.com/go/workflows v1.12.2/go.mod h1:+OmBIgNqYJPVggnMo9nqmizW0qEXHhmnAzK/CnBqsHc= +cloud.google.com/go/workflows v1.12.3/go.mod h1:fmOUeeqEwPzIU81foMjTRQIdwQHADi/vEr1cx9R1m5g= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= +github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= +github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= +github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Antonboom/testifylint v1.1.1 h1:xCxYDNOBLImTKjBKPGtx1cHkTSywDAn76mYHTwH5lG8= +github.com/Antonboom/testifylint v1.1.1/go.mod h1:9PFi+vWa8zzl4/B/kqmFJcw85ZUv8ReyBzuQCd30+WI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -634,10 +1185,8 @@ github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jB github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= -github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= @@ -647,14 +1196,25 @@ github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= +github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/crlf v0.0.0-20171020200849-670099aa064f/go.mod h1:k8feO4+kXDxro6ErPXBRTJ/ro2mf0SsFG8s7doP9kJE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -663,6 +1223,7 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= @@ -678,14 +1239,13 @@ github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkE github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= -github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= @@ -695,6 +1255,8 @@ github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2z github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -703,13 +1265,26 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1U github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.2.0 h1:dKK3o/Hk2aIt6t72CWg02ham2P5lnH9MBSW6cTU9xxU= +github.com/bombsimon/wsl/v4 v4.2.0/go.mod h1:1zaTbf/7ywOQtMdoUdTF2X1fbbBLiBUkajyuFAanT28= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= +github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= +github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= +github.com/catenacyber/perfsprint v0.6.0 h1:VSv95RRkk5+BxrU/YTPcnxuMEWar1iMK5Vyh3fWcBfs= +github.com/catenacyber/perfsprint v0.6.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4= +github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= @@ -719,16 +1294,16 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= -github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= -github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 h1:E7LT642ysztPWE0dfz43cWOvMiF42DyTRC+eZIaO4yI= -github.com/chavacava/garif v0.0.0-20220630083739-93517212f375/go.mod h1:4m1Rv7xfuwWPNKXlThldNuJvutYM6J95wNuuVmn55To= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -744,17 +1319,16 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -763,21 +1337,22 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= -github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= -github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= +github.com/cristalhq/acmd v0.11.2/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/daixiang0/gci v0.12.1 h1:ugsG+KRYny1VK4oqrX4Vtj70bo4akYKa0tgT1DXMYiY= +github.com/daixiang0/gci v0.12.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -792,43 +1367,49 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/esimonov/ifshort v1.0.2/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= -github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghostiam/protogetter v0.3.4 h1:5SZ+lZSNmNkSbGVSF9hUHhv/b7ELF9Rwchoq7btYo6c= +github.com/ghostiam/protogetter v0.3.4/go.mod h1:A0JgIhs0fgVnotGinjQiKaFVG3waItLJNwPmcMzDnvk= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-critic/go-critic v0.5.6/go.mod h1:cVjj0DfqewQVIlIAGexPCaGaZDAqGE29PYDDADIVNEo= -github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo= -github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= +github.com/go-critic/go-critic v0.11.0 h1:mARtIFX7jPtJ3SzxO9Isa5T2jd2dZxFmQHK3yNf0wrE= +github.com/go-critic/go-critic v0.11.0/go.mod h1:Cz6lr1PlkIu/0Y0U9KqJgcIJJECAF8mEwmzVjKnhbfI= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= @@ -853,56 +1434,63 @@ github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpx github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astcopy v1.0.2 h1:YnWf5Rnh1hUudj11kei53kI57quN/VH6Hp1n+erozn0= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o= github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= -github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o= -github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM= -github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= +github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -945,23 +1533,20 @@ github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5 github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= -github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= -github.com/golangci/golangci-lint v1.41.1 h1:KH28pTSqRu6DTXIAANl1sPXNCmqg4VEH21z6G9Wj4SM= -github.com/golangci/golangci-lint v1.41.1/go.mod h1:LPtcY3aAAU8wydHrKpnanx9Og8K/cblZSyGmI5CJZUk= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.56.1 h1:vR6rJpjE1w6pRp2EkVeCAbISyUIl6c7OO/hrEtGK1yo= +github.com/golangci/golangci-lint v1.56.1/go.mod h1:sOHqnOxdEZ0u9JYrDuofOaIyO0jRgT8Y6nWfzuoSv0Y= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= +github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= +github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU= +github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -988,6 +1573,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v57 v57.0.0 h1:L+Y3UPTY8ALM8x+TV0lg+IEBI+upibemtBD8Q9u7zHs= github.com/google/go-github/v57 v57.0.0/go.mod h1:s0omdnye0hvK/ecLvpsGfJMiRt85PimQh4oygmLIxHw= +github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1014,26 +1601,33 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1047,33 +1641,26 @@ github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57Q github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= -github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= -github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= @@ -1082,18 +1669,16 @@ github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= @@ -1107,16 +1692,20 @@ github.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHAC github.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.3.0 h1:4d/wJojzvHV1I4i/rrjVaeuyxWrLzDE1mDCyDy8fXS8= github.com/hashicorp/go-plugin v1.3.0/go.mod h1:F9eH4LrE/ZsRdbwhfjs9k9HoDUwAHnYtXdgmf1AVNs0= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -1130,7 +1719,6 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -1142,9 +1730,10 @@ github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2Opti github.com/hashicorp/hcl/v2 v2.8.2/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7 h1:Pc5TCv9mbxFN6UVX0LH6CpQrdTM5YjbVI2w15237Pjk= github.com/hashicorp/terraform-config-inspect v0.0.0-20191212124732-c6ae6269b9d7/go.mod h1:p+ivJws3dpqbp1iP84+npOyAmTTOLMgCzrXd3GSdn/A= github.com/hashicorp/terraform-exec v0.13.3 h1:R6L2mNpDGSEqtLrSONN8Xth0xYwNrnEVzDz6LF/oJPk= @@ -1160,6 +1749,8 @@ github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= @@ -1174,44 +1765,50 @@ github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jgautheron/goconst v1.7.0 h1:cEqH+YBKLsECnRSd4F4TK5ri8t/aXtt/qoL0Ft252B0= +github.com/jgautheron/goconst v1.7.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jingyugao/rowserrcheck v1.1.0/go.mod h1:TOQpc2SLx6huPfoFGK3UOnEG+u02D3C1GeosjupAKCA= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jjti/go-spancheck v0.5.2 h1:WXTZG3efY/ji1Vi8mkH+23O3bLeKR6hp3tI3YB7XwKk= +github.com/jjti/go-spancheck v0.5.2/go.mod h1:ARPNI1JRG1V2Rjnd6/2f2NEfghjSVDZGVmruNKlnXU0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= @@ -1223,15 +1820,17 @@ github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c= -github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= @@ -1242,53 +1841,63 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30= -github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= -github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= +github.com/kunwardeep/paralleltest v1.0.9 h1:3Sr2IfFNcsMmlqPk1cjTUbJ4zofKPGyHxenwPebgTug= +github.com/kunwardeep/paralleltest v1.0.9/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= -github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= +github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= -github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1299,33 +1908,33 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.7/go.mod h1:vuE5ox/4L/HDd63MCcCk3H6wTLQ6XXezRphJ8cJJOxY= -github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI= -github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= +github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= @@ -1344,10 +1953,9 @@ github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -1359,33 +1967,28 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= -github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM= -github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= +github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.15.2 h1:N2ORxUxPU56R9gsfLIlVVvCv/V/VVou5qVI1oBKBNHg= +github.com/nunnatsa/ginkgolinter v0.15.2/go.mod h1:oYxE7dt1vZI8cK2rZOs3RgTaBN2vggkqnENmoJ8kVvc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= @@ -1394,34 +1997,62 @@ github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= -github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= +github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= +github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= +github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= -github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= +github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= +github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= +github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1435,21 +2066,23 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= -github.com/polyfloyd/go-errorlint v1.0.5 h1:AHB5JRCjlmelh9RrLxT9sgzpalIwwq4hqE8EkwIwKdY= -github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= +github.com/polyfloyd/go-errorlint v1.4.8 h1:jiEjKDH33ouFktyez7sckv6pHWif9B7SuS8cutDXFHw= +github.com/polyfloyd/go-errorlint v1.4.8/go.mod h1:NNCxFcFjZcw3xNjVdCchERkEM6Oz7wta2XJVxRftwO4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1457,43 +2090,39 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1: github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.4/go.mod h1:57FZgMnoo6jqxkYKmVj5Fc8vOt0rVzoE/UNAmFFIPqA= -github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= -github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= +github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo= +github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.2/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U= -github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/go-dbus v0.0.0-20121104212943-b7232d34b1d5/go.mod h1:+u151txRmLpwxBmpYn9z3d1sdJdjRPQpsXuYeY9jNls= github.com/remyoudompheng/go-liblzma v0.0.0-20190506200333-81bf2d431b96/go.mod h1:90HvCY7+oHHUKkbeMCiHt1WuFR2/hPJ9QrljDG+v6ls= github.com/remyoudompheng/go-misc v0.0.0-20190427085024-2d6ac652a50e/go.mod h1:80FQABjoFzZ2M5uEa6FUaJYEmqU2UOKojlFVak1UAwI= @@ -1501,35 +2130,40 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/ryancurrah/gomodguard v1.2.2/go.mod h1:tpI+C/nzvfUR3bF28b5QHpTn/jM/zlGniI++6ZlIWeE= -github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= -github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= -github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= -github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc= +github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.8.0/go.mod h1:hJZ6NT5TqoY+jmOsaxAV4cXoEdrMRLVaNPnSpUCvCZs= -github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM= -github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo= +github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I= +github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.5/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shurcooL/githubv4 v0.0.0-20221126192849-0b5c4c7994eb h1:foJysa74+t41fG7adnt+TkfcNxQUWid8R/HlXe+Mmbw= github.com/shurcooL/githubv4 v0.0.0-20221126192849-0b5c4c7994eb/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -1542,33 +2176,36 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.4.1 h1:asw9sl74539yqavKaglDM5hFpdJVK0Y5Dr/JOgQ89nQ= -github.com/spf13/afero v1.4.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= -github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -1578,19 +2215,18 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1602,55 +2238,55 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= -github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.7/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= -github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= +github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= +github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= -github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= -github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= +github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= +github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= -github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.0 h1:O1HKYoh0kIeqE8sFqZf1o0qbORXUCOQFrlaQyZsczZw= +github.com/ultraware/whitespace v0.1.0/go.mod h1:/se4r3beMFNmewJ4Xmz0nMQ941GJt+qmSHGP9emHYe0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= -github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= -github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vmihailenco/msgpack v3.3.3+incompatible h1:wapg9xDUZDzGCNFlwc5SqI1rvcciqcxEHac4CYj89xI= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -1660,12 +2296,17 @@ github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37w github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= @@ -1676,6 +2317,7 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.1.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= @@ -1687,10 +2329,21 @@ github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd5 github.com/zclconf/go-cty-yaml v1.0.2/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= +go-simpler.org/assert v0.7.0 h1:OzWWZqfNxt8cLS+MlUp6Tgk1HjPkmgdKBq9qvy8lZsA= +go-simpler.org/assert v0.7.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/musttag v0.8.0 h1:DR4UTgetNNhPRNo02rkK1hwDTRzAPotN+ZqYpdtEwWc= +go-simpler.org/musttag v0.8.0/go.mod h1:fiNdCkXt2S6je9Eblma3okjnlva9NT1Eg/WUt19rWu8= +go-simpler.org/sloglint v0.4.0 h1:UVJuUJo63iNQNFEOtZ6o1xAgagVg/giVLLvG9nNLobI= +go-simpler.org/sloglint v0.4.0/go.mod h1:v6zJ++j/thFPhefs2wEXoCKwT10yo5nkBDYRCXyqgNQ= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -1707,15 +2360,23 @@ go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1723,19 +2384,28 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1753,13 +2423,15 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= +golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= -golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20231219180239-dc181d75b848 h1:UhRVJ0i7bF9n/Hd8YjW3eKjlPVBHzbQdxrBgjbSKl64= +golang.org/x/exp/typeparams v0.0.0-20231219180239-dc181d75b848/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1800,19 +2472,23 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1842,14 +2518,12 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1857,8 +2531,10 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1868,6 +2544,7 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -1877,6 +2554,7 @@ golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfS golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -1884,6 +2562,14 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1901,6 +2587,7 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= @@ -1917,6 +2604,11 @@ golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1931,18 +2623,22 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1960,6 +2656,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1972,6 +2669,7 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1983,7 +2681,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1991,19 +2688,21 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2017,8 +2716,10 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2036,12 +2737,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2051,6 +2752,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2064,6 +2772,13 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2082,6 +2797,10 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2092,23 +2811,20 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -2116,16 +2832,15 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2135,7 +2850,6 @@ golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2148,17 +2862,12 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -2168,25 +2877,19 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028111035-eafbe7b904eb/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= @@ -2196,18 +2899,32 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.1.12-0.20220628192153-7743d1d949f1/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.11.1/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.12.1-0.20230825192346-2191a27a6dc5/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2256,6 +2973,7 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= @@ -2266,6 +2984,7 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69 google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= @@ -2287,8 +3006,14 @@ google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45 google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= +google.golang.org/api v0.139.0/go.mod h1:CVagp6Eekz9CjGZ718Z+sloknzkDJE7Vc1Ckj9+viBk= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= +google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= +google.golang.org/api v0.152.0 h1:t0r1vPnfMc260S2Ci+en7kfCZaLOPs5KI0sVV/6jZrY= +google.golang.org/api v0.152.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2363,6 +3088,8 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -2383,6 +3110,7 @@ google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= @@ -2439,19 +3167,64 @@ google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108= +google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= +google.golang.org/genproto v0.0.0-20230821184602-ccc8af3d0e93/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/api v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:RdyHbowztCGQySiCvQPgWQWgWhGnouTdCflKoDBt32U= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= +google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20231120223509-83a465c0220f/go.mod h1:iIgEblxoG4klcXsG0d9cpoxJ4xndv6+1FkDROCHhPRI= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920183334-c177e329c48b/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -2499,8 +3272,13 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2534,7 +3312,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -2556,6 +3335,7 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2566,20 +3346,24 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= -honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= +honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= +honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= @@ -2589,32 +3373,43 @@ modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= -mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= +mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= +mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= -mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= -mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 h1:zCr3iRRgdk5eIikZNDphGcM6KGVTx3Yu+/Uu9Es254w= +mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/4d63.com/gocheckcompilerdirectives/LICENSE b/vendor/4d63.com/gocheckcompilerdirectives/LICENSE new file mode 100644 index 0000000000..3f12625b06 --- /dev/null +++ b/vendor/4d63.com/gocheckcompilerdirectives/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Leigh McCulloch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go b/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go new file mode 100644 index 0000000000..19948c4547 --- /dev/null +++ b/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go @@ -0,0 +1,105 @@ +package checkcompilerdirectives + +import ( + "strings" + + "golang.org/x/tools/go/analysis" +) + +func Analyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "gocheckcompilerdirectives", + Doc: "Checks that go compiler directive comments (//go:) are valid.", + Run: run, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + for _, group := range file.Comments { + for _, comment := range group.List { + text := comment.Text + if !strings.HasPrefix(text, "//") { + continue + } + start := 2 + spaces := 0 + for _, c := range text[start:] { + if c == ' ' { + spaces++ + continue + } + break + } + start += spaces + if !strings.HasPrefix(text[start:], "go:") { + continue + } + start += 3 + end := strings.Index(text[start:], " ") + if end == -1 { + continue + } + directive := text[start : start+end] + if len(directive) == 0 { + continue + } + prefix := text[:start+end] + // Leading whitespace will cause the go directive to be ignored + // by the compiler with no error, causing it not to work. This + // is an easy mistake. + if spaces > 0 { + pass.ReportRangef(comment, "compiler directive contains space: %s", prefix) + } + // If the directive is unknown it will be ignored by the + // compiler with no error. This is an easy mistake to make, + // especially if you typo a directive. + if !isKnown(directive) { + pass.ReportRangef(comment, "compiler directive unrecognized: %s", prefix) + } + } + } + } + return nil, nil +} + +func isKnown(directive string) bool { + for _, k := range known { + if directive == k { + return true + } + } + return false +} + +var known = []string{ + // Found by running the following command on the source of go. + // git grep -o -E -h '//go:[a-z_]+' -- ':!**/*_test.go' ':!test/' ':!**/testdata/**' | sort -u + "binary", + "build", + "buildsomethingelse", + "cgo_dynamic_linker", + "cgo_export_dynamic", + "cgo_export_static", + "cgo_import_dynamic", + "cgo_import_static", + "cgo_ldflag", + "cgo_unsafe_args", + "embed", + "generate", + "linkname", + "name", + "nocheckptr", + "noescape", + "noinline", + "nointerface", + "norace", + "nosplit", + "notinheap", + "nowritebarrier", + "nowritebarrierrec", + "systemstack", + "uintptrescapes", + "uintptrkeepalive", + "yeswritebarrierrec", +} diff --git a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go index 9ae889d4ba..edf9193ecb 100644 --- a/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go +++ b/vendor/4d63.com/gochecknoglobals/checknoglobals/check_no_globals.go @@ -5,6 +5,7 @@ import ( "fmt" "go/ast" "go/token" + "go/types" "strings" "golang.org/x/tools/go/analysis" @@ -48,12 +49,12 @@ func flags() flag.FlagSet { return *flags } -func isAllowed(cm ast.CommentMap, v ast.Node) bool { +func isAllowed(cm ast.CommentMap, v ast.Node, ti *types.Info) bool { switch i := v.(type) { case *ast.GenDecl: return hasEmbedComment(cm, i) case *ast.Ident: - return i.Name == "_" || i.Name == "version" || looksLikeError(i) || identHasEmbedComment(cm, i) + return i.Name == "_" || i.Name == "version" || isError(i, ti) || identHasEmbedComment(cm, i) case *ast.CallExpr: if expr, ok := i.Fun.(*ast.SelectorExpr); ok { return isAllowedSelectorExpression(expr) @@ -86,10 +87,14 @@ func isAllowedSelectorExpression(v *ast.SelectorExpr) bool { return false } +// isError reports whether the AST identifier looks like +// an error and implements the error interface. +func isError(i *ast.Ident, ti *types.Info) bool { + return looksLikeError(i) && implementsError(i, ti) +} + // looksLikeError returns true if the AST identifier starts // with 'err' or 'Err', or false otherwise. -// -// TODO: https://github.com/leighmcculloch/gochecknoglobals/issues/5 func looksLikeError(i *ast.Ident) bool { prefix := "err" if i.IsExported() { @@ -98,6 +103,14 @@ func looksLikeError(i *ast.Ident) bool { return strings.HasPrefix(i.Name, prefix) } +// implementsError reports whether the AST identifier +// implements the error interface. +func implementsError(i *ast.Ident, ti *types.Info) bool { + t := ti.TypeOf(i) + et := types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + return types.Implements(t, et) +} + func identHasEmbedComment(cm ast.CommentMap, i *ast.Ident) bool { if i.Obj == nil { return false @@ -146,7 +159,7 @@ func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { if genDecl.Tok != token.VAR { continue } - if isAllowed(fileCommentMap, genDecl) { + if isAllowed(fileCommentMap, genDecl, pass.TypesInfo) { continue } for _, spec := range genDecl.Specs { @@ -154,7 +167,7 @@ func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { onlyAllowedValues := false for _, vn := range valueSpec.Values { - if isAllowed(fileCommentMap, vn) { + if isAllowed(fileCommentMap, vn, pass.TypesInfo) { onlyAllowedValues = true continue } @@ -168,7 +181,7 @@ func checkNoGlobals(pass *analysis.Pass) (interface{}, error) { } for _, vn := range valueSpec.Names { - if isAllowed(fileCommentMap, vn) { + if isAllowed(fileCommentMap, vn, pass.TypesInfo) { continue } diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index e939b9f5e0..540ad16ac4 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.20.1" +const Version = "1.23.3" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 40ae15de52..c4cacb03f8 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,5 +1,62 @@ # Changes + +## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.1.4](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.3...iam/v1.1.4) (2023-10-26) + + +### Bug Fixes + +* **iam:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.1.3](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.2...iam/v1.1.3) (2023-10-12) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + +## [1.1.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.1...iam/v1.1.2) (2023-08-08) + + +### Documentation + +* **iam:** Minor formatting ([b4349cc](https://github.com/googleapis/google-cloud-go/commit/b4349cc507870ff8629bbc07de578b63bb889626)) + +## [1.1.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.0...iam/v1.1.1) (2023-06-20) + + +### Bug Fixes + +* **iam:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) + +## [1.1.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.1...iam/v1.1.0) (2023-05-30) + + +### Features + +* **iam:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + +## [1.0.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.0.0...iam/v1.0.1) (2023-05-08) + + +### Bug Fixes + +* **iam:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + +## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04) + + +### Features + +* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742)) + ## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 21079f65c3..85346a891d 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.2 // source: google/iam/v1/iam_policy.proto package iampb @@ -342,37 +342,37 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x3a, 0x01, 0x2a, 0x22, 0x1e, 0x2f, 0x76, 0x31, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x9a, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, - 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, - 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, + 0x01, 0x2a, 0x22, 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, - 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index e8a2aca9c7..68f8d761f7 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.2 // source: google/iam/v1/options.proto package iampb @@ -111,16 +111,16 @@ var file_google_iam_v1_options_proto_rawDesc = []byte{ 0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, - 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, - 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, - 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, + 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, + 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, + 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index e521db60fa..eefd1d0e54 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.2 // source: google/iam/v1/policy.proto package iampb @@ -214,10 +214,13 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // only if the expression evaluates to `true`. A condition can add constraints // based on attributes of the request, the resource, or both. To learn which // resources support conditions in their IAM policies, see the -// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). +// [IAM +// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). // // **JSON example:** // +// ``` +// // { // "bindings": [ // { @@ -237,7 +240,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // "condition": { // "title": "expirable access", // "description": "Does not grant access after Sep 2020", -// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", // } // } // ], @@ -245,8 +249,12 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // "version": 3 // } // +// ``` +// // **YAML example:** // +// ``` +// // bindings: // - members: // - user:mike@example.com @@ -264,6 +272,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // etag: BwWWja0YfJA= // version: 3 // +// ``` +// // For a description of IAM and its features, see the // [IAM documentation](https://cloud.google.com/iam/docs/). type Policy struct { @@ -279,11 +289,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // - Getting a policy that includes a conditional role binding - // - Adding a conditional role binding to a policy - // - Changing a conditional role binding in a policy - // - Removing any role binding, with or without a condition, from a policy - // that includes conditions + // * Getting a policy that includes a conditional role binding + // * Adding a conditional role binding to a policy + // * Changing a conditional role binding in a policy + // * Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -294,7 +304,8 @@ type Policy struct { // specify any valid version or leave the field unset. // // To learn which resources support conditions in their IAM policies, see the - // [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). + // [IAM + // documentation](https://cloud.google.com/iam/help/conditions/resource-policies). Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // Associates a list of `members`, or principals, with a `role`. Optionally, // may specify a `condition` that determines how and when the `bindings` are @@ -393,46 +404,50 @@ type Binding struct { // Role that is assigned to the list of `members`, or principals. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - // Specifies the principals requesting access for a Cloud Platform resource. + // Specifies the principals requesting access for a Google Cloud resource. // `members` can have the following values: // - // - `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // * `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. + // + // * `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. + // + // * `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . + // + // + // * `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // - `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // * `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // - `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // - `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // - // - `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // - // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. // - // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. + // * `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. // - // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. // - // - `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // @@ -551,8 +566,8 @@ func (x *Binding) GetCondition() *expr.Expr { // } // // For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. +// logging. It also exempts `jose@example.com` from DATA_READ logging, and +// `aliya@example.com` from DATA_WRITE logging. type AuditConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -640,7 +655,8 @@ type AuditLogConfig struct { LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"` // Specifies the identities that do not cause logging for this type of // permission. - // Follows the same format of [Binding.members][google.iam.v1.Binding.members]. + // Follows the same format of + // [Binding.members][google.iam.v1.Binding.members]. ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"` } @@ -762,7 +778,7 @@ type BindingDelta struct { // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. // Required Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - // A single identity requesting access for a Cloud Platform resource. + // A single identity requesting access for a Google Cloud resource. // Follows the same format of Binding.members. // Required Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` @@ -999,16 +1015,15 @@ var file_google_iam_v1_policy_proto_rawDesc = []byte{ 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, - 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, - 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, + 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 9482956b1d..31d1720471 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1,2099 +1,2532 @@ { "cloud.google.com/go/accessapproval/apiv1": { + "api_shortname": "accessapproval", "distribution_name": "cloud.google.com/go/accessapproval/apiv1", "description": "Access Approval API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/accessapproval/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accessapproval/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/accesscontextmanager/apiv1": { + "api_shortname": "accesscontextmanager", "distribution_name": "cloud.google.com/go/accesscontextmanager/apiv1", "description": "Access Context Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/accesscontextmanager/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/accesscontextmanager/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/advisorynotifications/apiv1": { + "api_shortname": "advisorynotifications", "distribution_name": "cloud.google.com/go/advisorynotifications/apiv1", "description": "Advisory Notifications API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/advisorynotifications/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/advisorynotifications/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/ai/generativelanguage/apiv1beta2": { + "api_shortname": "generativelanguage", + "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta2", + "description": "Generative Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/aiplatform/apiv1": { + "api_shortname": "aiplatform", "distribution_name": "cloud.google.com/go/aiplatform/apiv1", "description": "Vertex AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/aiplatform/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/aiplatform/apiv1beta1": { + "api_shortname": "aiplatform", "distribution_name": "cloud.google.com/go/aiplatform/apiv1beta1", "description": "Vertex AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/aiplatform/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/aiplatform/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/alloydb/apiv1": { + "api_shortname": "alloydb", "distribution_name": "cloud.google.com/go/alloydb/apiv1", "description": "AlloyDB API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/alloydb/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/alloydb/apiv1alpha": { + "api_shortname": "alloydb", "distribution_name": "cloud.google.com/go/alloydb/apiv1alpha", "description": "AlloyDB API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/alloydb/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/alloydb/apiv1beta": { + "api_shortname": "alloydb", "distribution_name": "cloud.google.com/go/alloydb/apiv1beta", "description": "AlloyDB API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1alpha": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/alloydb/connectors/apiv1beta": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1beta", + "description": "AlloyDB connectors", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/alloydb/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/analytics/admin/apiv1alpha": { + "api_shortname": "analyticsadmin", "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", "description": "Google Analytics Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/analytics/admin/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/analytics/latest/admin/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigateway/apiv1": { + "api_shortname": "apigateway", "distribution_name": "cloud.google.com/go/apigateway/apiv1", "description": "API Gateway API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apigateway/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigateway/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigeeconnect/apiv1": { + "api_shortname": "apigeeconnect", "distribution_name": "cloud.google.com/go/apigeeconnect/apiv1", "description": "Apigee Connect API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apigeeconnect/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeconnect/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apigeeregistry/apiv1": { + "api_shortname": "apigeeregistry", "distribution_name": "cloud.google.com/go/apigeeregistry/apiv1", "description": "Apigee Registry API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apigeeregistry/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apikeys/apiv2": { + "api_shortname": "apikeys", "distribution_name": "cloud.google.com/go/apikeys/apiv2", "description": "API Keys API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/apikeys/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apikeys/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/appengine/apiv1": { + "api_shortname": "appengine", "distribution_name": "cloud.google.com/go/appengine/apiv1", "description": "App Engine Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/appengine/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/appengine/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/area120/tables/apiv1alpha1": { + "api_shortname": "area120tables", "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", "description": "Area120 Tables API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/area120/tables/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/area120/latest/tables/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/artifactregistry/apiv1": { + "api_shortname": "artifactregistry", "distribution_name": "cloud.google.com/go/artifactregistry/apiv1", "description": "Artifact Registry API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/artifactregistry/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/artifactregistry/apiv1beta2": { + "api_shortname": "artifactregistry", "distribution_name": "cloud.google.com/go/artifactregistry/apiv1beta2", "description": "Artifact Registry API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/artifactregistry/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/artifactregistry/latest/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1": { + "api_shortname": "cloudasset", "distribution_name": "cloud.google.com/go/asset/apiv1", "description": "Cloud Asset API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/asset/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p2beta1": { + "api_shortname": "cloudasset", "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", "description": "Cloud Asset API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/asset/apiv1p2beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p2beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/asset/apiv1p5beta1": { + "api_shortname": "cloudasset", "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", "description": "Cloud Asset API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/asset/apiv1p5beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/asset/latest/apiv1p5beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1": { + "api_shortname": "assuredworkloads", "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1", "description": "Assured Workloads API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/assuredworkloads/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { + "api_shortname": "assuredworkloads", "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", "description": "Assured Workloads API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/assuredworkloads/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1": { + "api_shortname": "automl", "distribution_name": "cloud.google.com/go/automl/apiv1", "description": "Cloud AutoML API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/automl/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/automl/apiv1beta1": { + "api_shortname": "automl", "distribution_name": "cloud.google.com/go/automl/apiv1beta1", "description": "Cloud AutoML API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/automl/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/automl/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/baremetalsolution/apiv2": { + "api_shortname": "baremetalsolution", "distribution_name": "cloud.google.com/go/baremetalsolution/apiv2", "description": "Bare Metal Solution API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/baremetalsolution/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/baremetalsolution/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/batch/apiv1": { + "api_shortname": "batch", "distribution_name": "cloud.google.com/go/batch/apiv1", "description": "Batch API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/batch/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/batch/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appconnections/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/appconnections/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/appconnections/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnections/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appconnectors/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/appconnectors/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/appconnectors/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appconnectors/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/appgateways/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/appgateways/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/appgateways/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/appgateways/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/clientconnectorservices/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientconnectorservices/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/beyondcorp/clientgateways/apiv1": { + "api_shortname": "beyondcorp", "distribution_name": "cloud.google.com/go/beyondcorp/clientgateways/apiv1", "description": "BeyondCorp API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/beyondcorp/clientgateways/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/beyondcorp/latest/clientgateways/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery": { + "api_shortname": "bigquery", "distribution_name": "cloud.google.com/go/bigquery", "description": "BigQuery", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/bigquery/analyticshub/apiv1": { + "api_shortname": "analyticshub", "distribution_name": "cloud.google.com/go/bigquery/analyticshub/apiv1", "description": "Analytics Hub API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/analyticshub/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1", + "description": "BigLake API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/analyticshub/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1alpha1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1": { + "api_shortname": "bigqueryconnection", "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", "description": "BigQuery Connection API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/connection/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/connection/apiv1beta1": { + "api_shortname": "bigqueryconnection", "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", "description": "BigQuery Connection API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/connection/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/connection/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/dataexchange/apiv1beta1": { + "api_shortname": "analyticshub", "distribution_name": "cloud.google.com/go/bigquery/dataexchange/apiv1beta1", "description": "Analytics Hub API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/dataexchange/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/dataexchange/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datapolicies/apiv1": { + "api_shortname": "bigquerydatapolicy", "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1", "description": "BigQuery Data Policy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/datapolicies/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datapolicies/apiv1beta1": { + "api_shortname": "bigquerydatapolicy", "distribution_name": "cloud.google.com/go/bigquery/datapolicies/apiv1beta1", "description": "BigQuery Data Policy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/datapolicies/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datapolicies/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/datatransfer/apiv1": { + "api_shortname": "bigquerydatatransfer", "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", "description": "BigQuery Data Transfer API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/datatransfer/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/datatransfer/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2": { + "api_shortname": "bigquerymigration", "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2", "description": "BigQuery Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/migration/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/migration/apiv2alpha": { + "api_shortname": "bigquerymigration", "distribution_name": "cloud.google.com/go/bigquery/migration/apiv2alpha", "description": "BigQuery Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/migration/apiv2alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/reservation/apiv1": { + "api_shortname": "bigqueryreservation", "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", "description": "BigQuery Reservation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/reservation/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/reservation/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1": { + "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", "description": "BigQuery Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/storage/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta1": { + "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", "description": "BigQuery Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/storage/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigquery/storage/apiv1beta2": { + "api_shortname": "bigquerystorage", "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", "description": "BigQuery Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/bigquery/storage/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/bigtable": { + "api_shortname": "bigtable", "distribution_name": "cloud.google.com/go/bigtable", "description": "Cloud BigTable", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/billing/apiv1": { + "api_shortname": "cloudbilling", "distribution_name": "cloud.google.com/go/billing/apiv1", "description": "Cloud Billing API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/billing/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1": { + "api_shortname": "billingbudgets", "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", "description": "Cloud Billing Budget API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/billing/budgets/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/billing/budgets/apiv1beta1": { + "api_shortname": "billingbudgets", "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", "description": "Cloud Billing Budget API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/billing/budgets/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/billing/latest/budgets/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1": { + "api_shortname": "binaryauthorization", "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1", "description": "Binary Authorization API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/binaryauthorization/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { + "api_shortname": "binaryauthorization", "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", "description": "Binary Authorization API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/binaryauthorization/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/certificatemanager/apiv1": { + "api_shortname": "certificatemanager", "distribution_name": "cloud.google.com/go/certificatemanager/apiv1", "description": "Certificate Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/certificatemanager/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/certificatemanager/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/channel/apiv1": { + "api_shortname": "cloudchannel", "distribution_name": "cloud.google.com/go/channel/apiv1", "description": "Cloud Channel API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/channel/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/channel/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudbuild/apiv1/v2": { + "api_shortname": "cloudbuild", "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", "description": "Cloud Build API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudbuild/apiv1/v2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv1/v2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudbuild/apiv2": { + "api_shortname": "cloudbuild", "distribution_name": "cloud.google.com/go/cloudbuild/apiv2", "description": "Cloud Build API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudbuild/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudbuild/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/clouddms/apiv1": { + "api_shortname": "datamigration", "distribution_name": "cloud.google.com/go/clouddms/apiv1", "description": "Database Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/clouddms/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/clouddms/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2": { + "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", "description": "Cloud Tasks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudtasks/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta2": { + "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", "description": "Cloud Tasks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudtasks/apiv2beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/cloudtasks/apiv2beta3": { + "api_shortname": "cloudtasks", "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", "description": "Cloud Tasks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/cloudtasks/apiv2beta3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudtasks/latest/apiv2beta3", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/commerce/consumer/procurement/apiv1": { + "api_shortname": "cloudcommerceconsumerprocurement", + "distribution_name": "cloud.google.com/go/commerce/consumer/procurement/apiv1", + "description": "Cloud Commerce Consumer Procurement API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/commerce/latest/consumer/procurement/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/apiv1": { + "api_shortname": "compute", "distribution_name": "cloud.google.com/go/compute/apiv1", "description": "Google Compute Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/compute/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/compute/metadata": { + "api_shortname": "compute-metadata", "distribution_name": "cloud.google.com/go/compute/metadata", "description": "Service Metadata API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/compute/latest/metadata", + "release_level": "stable", "library_type": "CORE" }, "cloud.google.com/go/confidentialcomputing/apiv1": { + "api_shortname": "confidentialcomputing", "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1", "description": "Confidential Computing API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/confidentialcomputing/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/confidentialcomputing/apiv1alpha1": { + "api_shortname": "confidentialcomputing", "distribution_name": "cloud.google.com/go/confidentialcomputing/apiv1alpha1", "description": "Confidential Computing API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/confidentialcomputing/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/confidentialcomputing/latest/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/config/apiv1": { + "api_shortname": "config", + "distribution_name": "cloud.google.com/go/config/apiv1", + "description": "Infrastructure Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/contactcenterinsights/apiv1": { + "api_shortname": "contactcenterinsights", "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", "description": "Contact Center AI Insights API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/contactcenterinsights/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/contactcenterinsights/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/container/apiv1": { + "api_shortname": "container", "distribution_name": "cloud.google.com/go/container/apiv1", "description": "Kubernetes Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/container/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/container/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/containeranalysis/apiv1beta1": { + "api_shortname": "containeranalysis", "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", "description": "Container Analysis API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/containeranalysis/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1": { + "api_shortname": "datacatalog", "distribution_name": "cloud.google.com/go/datacatalog/apiv1", "description": "Google Cloud Data Catalog API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datacatalog/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/apiv1beta1": { + "api_shortname": "datacatalog", "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", "description": "Google Cloud Data Catalog API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datacatalog/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datacatalog/lineage/apiv1": { + "api_shortname": "datalineage", "distribution_name": "cloud.google.com/go/datacatalog/lineage/apiv1", "description": "Data Lineage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datacatalog/lineage/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datacatalog/latest/lineage/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataflow/apiv1beta3": { + "api_shortname": "dataflow", "distribution_name": "cloud.google.com/go/dataflow/apiv1beta3", "description": "Dataflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataflow/apiv1beta3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataflow/latest/apiv1beta3", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataform/apiv1alpha2": { + "api_shortname": "dataform", "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2", "description": "Dataform API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataform/apiv1alpha2", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataform/apiv1beta1": { + "api_shortname": "dataform", "distribution_name": "cloud.google.com/go/dataform/apiv1beta1", "description": "Dataform API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataform/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datafusion/apiv1": { + "api_shortname": "datafusion", "distribution_name": "cloud.google.com/go/datafusion/apiv1", "description": "Cloud Data Fusion API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datafusion/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datafusion/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datalabeling/apiv1beta1": { + "api_shortname": "datalabeling", "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", "description": "Data Labeling API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datalabeling/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datalabeling/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataplex/apiv1": { + "api_shortname": "dataplex", "distribution_name": "cloud.google.com/go/dataplex/apiv1", "description": "Cloud Dataplex API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataplex/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataproc/v2/apiv1": { + "api_shortname": "dataproc", "distribution_name": "cloud.google.com/go/dataproc/v2/apiv1", "description": "Cloud Dataproc API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataproc/v2/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataproc/v2/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataqna/apiv1alpha": { + "api_shortname": "dataqna", "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", "description": "Data QnA API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dataqna/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataqna/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastore": { + "api_shortname": "datastore", "distribution_name": "cloud.google.com/go/datastore", "description": "Cloud Datastore", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/datastore/admin/apiv1": { + "api_shortname": "datastore", "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", "description": "Cloud Datastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datastore/admin/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/admin/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1": { + "api_shortname": "datastream", "distribution_name": "cloud.google.com/go/datastream/apiv1", "description": "Datastream API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datastream/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1alpha1": { + "api_shortname": "datastream", "distribution_name": "cloud.google.com/go/datastream/apiv1alpha1", "description": "Datastream API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/datastream/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/debugger/apiv2": { + "api_shortname": "clouddebugger", "distribution_name": "cloud.google.com/go/debugger/apiv2", "description": "Stackdriver Debugger API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/debugger/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/debugger/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/deploy/apiv1": { + "api_shortname": "clouddeploy", "distribution_name": "cloud.google.com/go/deploy/apiv1", - "description": "Google Cloud Deploy API", - "language": "Go", + "description": "Cloud Deploy API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/deploy/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/deploy/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/apiv2": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/apiv2beta1": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/apiv2beta1", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/apiv2beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/apiv2beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", "description": "Dialogflow API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/cx/apiv3", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { + "api_shortname": "dialogflow", "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", "description": "Dialogflow API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dialogflow/latest/cx/apiv3beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/discoveryengine/apiv1": { + "api_shortname": "discoveryengine", + "distribution_name": "cloud.google.com/go/discoveryengine/apiv1", + "description": "Discovery Engine API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dialogflow/cx/apiv3beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/discoveryengine/apiv1beta": { + "api_shortname": "discoveryengine", "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta", "description": "Discovery Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/discoveryengine/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dlp/apiv2": { + "api_shortname": "dlp", "distribution_name": "cloud.google.com/go/dlp/apiv2", - "description": "Cloud Data Loss Prevention (DLP) API", - "language": "Go", + "description": "Cloud Data Loss Prevention (DLP)", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/dlp/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1": { + "api_shortname": "documentai", "distribution_name": "cloud.google.com/go/documentai/apiv1", "description": "Cloud Document AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/documentai/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/documentai/apiv1beta3": { + "api_shortname": "documentai", "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", "description": "Cloud Document AI API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/documentai/apiv1beta3", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/documentai/latest/apiv1beta3", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/domains/apiv1beta1": { + "api_shortname": "domains", "distribution_name": "cloud.google.com/go/domains/apiv1beta1", "description": "Cloud Domains API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/domains/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/domains/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/edgecontainer/apiv1": { + "api_shortname": "edgecontainer", "distribution_name": "cloud.google.com/go/edgecontainer/apiv1", "description": "Distributed Cloud Edge Container API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/edgecontainer/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgecontainer/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/errorreporting": { + "api_shortname": "clouderrorreporting", "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/errorreporting/latest", + "release_level": "preview", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/errorreporting/apiv1beta1": { + "api_shortname": "clouderrorreporting", "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", "description": "Error Reporting API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/errorreporting/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/errorreporting/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/essentialcontacts/apiv1": { + "api_shortname": "essentialcontacts", "distribution_name": "cloud.google.com/go/essentialcontacts/apiv1", "description": "Essential Contacts API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/essentialcontacts/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/essentialcontacts/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/eventarc/apiv1": { + "api_shortname": "eventarc", "distribution_name": "cloud.google.com/go/eventarc/apiv1", "description": "Eventarc API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/eventarc/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/eventarc/publishing/apiv1": { + "api_shortname": "eventarcpublishing", "distribution_name": "cloud.google.com/go/eventarc/publishing/apiv1", "description": "Eventarc Publishing API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/eventarc/publishing/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/eventarc/latest/publishing/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/filestore/apiv1": { + "api_shortname": "file", "distribution_name": "cloud.google.com/go/filestore/apiv1", "description": "Cloud Filestore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/filestore/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/filestore/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/firestore": { + "api_shortname": "firestore", "distribution_name": "cloud.google.com/go/firestore", "description": "Cloud Firestore API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/firestore/apiv1": { + "api_shortname": "firestore", "distribution_name": "cloud.google.com/go/firestore/apiv1", "description": "Cloud Firestore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/firestore/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv1": { + "api_shortname": "cloudfunctions", "distribution_name": "cloud.google.com/go/functions/apiv1", "description": "Cloud Functions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/functions/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv2": { + "api_shortname": "cloudfunctions", "distribution_name": "cloud.google.com/go/functions/apiv2", "description": "Cloud Functions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/functions/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/apiv2beta": { + "api_shortname": "cloudfunctions", "distribution_name": "cloud.google.com/go/functions/apiv2beta", "description": "Cloud Functions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/functions/apiv2beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/apiv2beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/functions/metadata": { + "api_shortname": "firestore-metadata", "distribution_name": "cloud.google.com/go/functions/metadata", "description": "Cloud Functions", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/functions/latest/metadata", + "release_level": "preview", "library_type": "CORE" }, - "cloud.google.com/go/gaming/apiv1": { - "distribution_name": "cloud.google.com/go/gaming/apiv1", - "description": "Game Services API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gaming/apiv1", - "release_level": "ga", - "library_type": "GAPIC_AUTO" - }, - "cloud.google.com/go/gaming/apiv1beta": { - "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "Game Services API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gaming/apiv1beta", - "release_level": "beta", - "library_type": "GAPIC_AUTO" - }, "cloud.google.com/go/gkebackup/apiv1": { + "api_shortname": "gkebackup", "distribution_name": "cloud.google.com/go/gkebackup/apiv1", "description": "Backup for GKE API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkebackup/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkebackup/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkeconnect/gateway/apiv1beta1": { + "api_shortname": "connectgateway", "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1", "description": "Connect Gateway API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkeconnect/gateway/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkehub/apiv1beta1": { + "api_shortname": "gkehub", "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", "description": "GKE Hub API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkehub/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkehub/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gkemulticloud/apiv1": { + "api_shortname": "gkemulticloud", "distribution_name": "cloud.google.com/go/gkemulticloud/apiv1", "description": "Anthos Multi-Cloud API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gkemulticloud/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/gsuiteaddons/apiv1": { + "api_shortname": "gsuiteaddons", "distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1", "description": "Google Workspace Add-ons API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/gsuiteaddons/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam": { + "api_shortname": "iam", "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest", + "release_level": "stable", "library_type": "CORE" }, "cloud.google.com/go/iam/apiv1": { + "api_shortname": "iam-meta-api", "distribution_name": "cloud.google.com/go/iam/apiv1", "description": "IAM Meta API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iam/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam/apiv2": { + "api_shortname": "iam", "distribution_name": "cloud.google.com/go/iam/apiv2", "description": "Identity and Access Management (IAM) API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iam/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iam/credentials/apiv1": { + "api_shortname": "iamcredentials", "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", "description": "IAM Service Account Credentials API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iam/credentials/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iam/latest/credentials/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iap/apiv1": { + "api_shortname": "iap", "distribution_name": "cloud.google.com/go/iap/apiv1", "description": "Cloud Identity-Aware Proxy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iap/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iap/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/ids/apiv1": { + "api_shortname": "ids", "distribution_name": "cloud.google.com/go/ids/apiv1", "description": "Cloud IDS API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/ids/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iot/apiv1": { + "api_shortname": "cloudiot", "distribution_name": "cloud.google.com/go/iot/apiv1", "description": "Cloud IoT API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/iot/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/iot/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/kms/apiv1": { + "api_shortname": "cloudkms", "distribution_name": "cloud.google.com/go/kms/apiv1", "description": "Cloud Key Management Service (KMS) API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/kms/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/kms/inventory/apiv1": { + "api_shortname": "kmsinventory", "distribution_name": "cloud.google.com/go/kms/inventory/apiv1", "description": "KMS Inventory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/kms/inventory/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/kms/latest/inventory/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1": { + "api_shortname": "language", "distribution_name": "cloud.google.com/go/language/apiv1", "description": "Cloud Natural Language API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/language/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/language/apiv1beta2": { + "api_shortname": "language", "distribution_name": "cloud.google.com/go/language/apiv1beta2", "description": "Cloud Natural Language API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/language/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv1beta2", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/language/apiv2": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv2", + "description": "Cloud Natural Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/lifesciences/apiv2beta": { + "api_shortname": "lifesciences", "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", "description": "Cloud Life Sciences API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/lifesciences/apiv2beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/lifesciences/latest/apiv2beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/logging": { + "api_shortname": "logging", "distribution_name": "cloud.google.com/go/logging", "description": "Cloud Logging API", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/logging/apiv2": { + "api_shortname": "logging", "distribution_name": "cloud.google.com/go/logging/apiv2", "description": "Cloud Logging API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/logging/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/logging/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/longrunning/autogen": { + "api_shortname": "longrunning", "distribution_name": "cloud.google.com/go/longrunning/autogen", "description": "Long Running Operations API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/longrunning/autogen", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/longrunning/latest/autogen", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/managedidentities/apiv1": { + "api_shortname": "managedidentities", "distribution_name": "cloud.google.com/go/managedidentities/apiv1", "description": "Managed Service for Microsoft Active Directory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/managedidentities/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedidentities/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/addressvalidation/apiv1": { + "api_shortname": "addressvalidation", "distribution_name": "cloud.google.com/go/maps/addressvalidation/apiv1", "description": "Address Validation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/maps/addressvalidation/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/addressvalidation/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/fleetengine/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1", + "description": "Local Rides and Deliveries API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/fleetengine/delivery/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/delivery/apiv1", + "description": "Last Mile Fleet Solution Delivery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/delivery/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { + "api_shortname": "mapsplatformdatasets", "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", "description": "Maps Platform Datasets API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/places/apiv1": { + "api_shortname": "places", + "distribution_name": "cloud.google.com/go/maps/places/apiv1", + "description": "Places API (New)", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/places/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/maps/routing/apiv2": { + "api_shortname": "routes", "distribution_name": "cloud.google.com/go/maps/routing/apiv2", "description": "Routes API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/maps/routing/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/routing/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/mediatranslation/apiv1beta1": { + "api_shortname": "mediatranslation", "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", "description": "Media Translation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/mediatranslation/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/mediatranslation/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1": { + "api_shortname": "memcache", "distribution_name": "cloud.google.com/go/memcache/apiv1", "description": "Cloud Memorystore for Memcached API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/memcache/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/memcache/apiv1beta2": { + "api_shortname": "memcache", "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", "description": "Cloud Memorystore for Memcached API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/memcache/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memcache/latest/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1": { + "api_shortname": "metastore", "distribution_name": "cloud.google.com/go/metastore/apiv1", "description": "Dataproc Metastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/metastore/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1alpha": { + "api_shortname": "metastore", "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", "description": "Dataproc Metastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/metastore/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/metastore/apiv1beta": { + "api_shortname": "metastore", "distribution_name": "cloud.google.com/go/metastore/apiv1beta", "description": "Dataproc Metastore API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/metastore/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/metastore/latest/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/migrationcenter/apiv1": { + "api_shortname": "migrationcenter", + "distribution_name": "cloud.google.com/go/migrationcenter/apiv1", + "description": "Migration Center API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/migrationcenter/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/apiv3/v2": { + "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", "description": "Cloud Monitoring API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/monitoring/apiv3/v2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/dashboard/apiv1": { + "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", "description": "Cloud Monitoring API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/monitoring/dashboard/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/dashboard/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/monitoring/metricsscope/apiv1": { + "api_shortname": "monitoring", "distribution_name": "cloud.google.com/go/monitoring/metricsscope/apiv1", "description": "Cloud Monitoring API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/monitoring/metricsscope/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/metricsscope/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/netapp/apiv1": { + "api_shortname": "netapp", + "distribution_name": "cloud.google.com/go/netapp/apiv1", + "description": "NetApp API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/netapp/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1": { + "api_shortname": "networkconnectivity", "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1", "description": "Network Connectivity API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networkconnectivity/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkconnectivity/apiv1alpha1": { + "api_shortname": "networkconnectivity", "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", "description": "Network Connectivity API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networkconnectivity/apiv1alpha1", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkconnectivity/latest/apiv1alpha1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networkmanagement/apiv1": { + "api_shortname": "networkmanagement", "distribution_name": "cloud.google.com/go/networkmanagement/apiv1", "description": "Network Management API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networkmanagement/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networkmanagement/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/networksecurity/apiv1beta1": { + "api_shortname": "networksecurity", "distribution_name": "cloud.google.com/go/networksecurity/apiv1beta1", "description": "Network Security API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/networksecurity/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/networksecurity/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1": { + "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1", "description": "Notebooks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/notebooks/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1beta1": { + "api_shortname": "notebooks", "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", "description": "Notebooks API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/notebooks/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/notebooks/apiv2": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv2", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/optimization/apiv1": { + "api_shortname": "cloudoptimization", "distribution_name": "cloud.google.com/go/optimization/apiv1", "description": "Cloud Optimization API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/optimization/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orchestration/airflow/service/apiv1": { + "api_shortname": "composer", "distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1", "description": "Cloud Composer API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/orchestration/airflow/service/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orchestration/latest/airflow/service/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orgpolicy/apiv2": { + "api_shortname": "orgpolicy", "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", "description": "Organization Policy API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/orgpolicy/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/orgpolicy/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/agentendpoint/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/agentendpoint/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/agentendpoint/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/apiv1", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1alpha": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/apiv1alpha", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/apiv1alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/osconfig/apiv1beta": { + "api_shortname": "osconfig", "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", "description": "OS Config API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/osconfig/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/osconfig/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1": { + "api_shortname": "oslogin", "distribution_name": "cloud.google.com/go/oslogin/apiv1", "description": "Cloud OS Login API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/oslogin/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/oslogin/apiv1beta": { + "api_shortname": "oslogin", "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", "description": "Cloud OS Login API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/oslogin/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oslogin/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/phishingprotection/apiv1beta1": { + "api_shortname": "phishingprotection", "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", "description": "Phishing Protection API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/phishingprotection/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policysimulator/apiv1": { + "api_shortname": "policysimulator", + "distribution_name": "cloud.google.com/go/policysimulator/apiv1", + "description": "Policy Simulator API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/phishingprotection/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/policytroubleshooter/apiv1": { + "api_shortname": "policytroubleshooter", "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", "description": "Policy Troubleshooter API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/policytroubleshooter/iam/apiv3": { + "api_shortname": "policytroubleshooter", + "distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3", + "description": "Policy Troubleshooter API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/policytroubleshooter/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/privatecatalog/apiv1beta1": { + "api_shortname": "cloudprivatecatalog", "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", "description": "Cloud Private Catalog API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/privatecatalog/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privatecatalog/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/profiler": { + "api_shortname": "cloudprofiler", "distribution_name": "cloud.google.com/go/profiler", "description": "Cloud Profiler", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/profiler/latest", + "release_level": "stable", "library_type": "AGENT" }, "cloud.google.com/go/pubsub": { + "api_shortname": "pubsub", "distribution_name": "cloud.google.com/go/pubsub", "description": "Cloud PubSub", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsub/apiv1": { + "api_shortname": "pubsub", "distribution_name": "cloud.google.com/go/pubsub/apiv1", "description": "Cloud Pub/Sub API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/pubsub/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsub/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/pubsublite": { + "api_shortname": "pubsublite", "distribution_name": "cloud.google.com/go/pubsublite", "description": "Cloud PubSub Lite", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/pubsublite/apiv1": { + "api_shortname": "pubsublite", "distribution_name": "cloud.google.com/go/pubsublite/apiv1", "description": "Pub/Sub Lite API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/pubsublite/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/pubsublite/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/rapidmigrationassessment/apiv1": { + "api_shortname": "rapidmigrationassessment", + "distribution_name": "cloud.google.com/go/rapidmigrationassessment/apiv1", + "description": "Rapid Migration Assessment API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/rapidmigrationassessment/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recaptchaenterprise/v2/apiv1": { + "api_shortname": "recaptchaenterprise", "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1", "description": "reCAPTCHA Enterprise API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recaptchaenterprise/v2/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1": { + "api_shortname": "recaptchaenterprise", "distribution_name": "cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", "description": "reCAPTCHA Enterprise API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recaptchaenterprise/v2/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recaptchaenterprise/v2/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommendationengine/apiv1beta1": { + "api_shortname": "recommendationengine", "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", "description": "Recommendations AI", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recommendationengine/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommendationengine/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1": { + "api_shortname": "recommender", "distribution_name": "cloud.google.com/go/recommender/apiv1", "description": "Recommender API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recommender/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/recommender/apiv1beta1": { + "api_shortname": "recommender", "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", "description": "Recommender API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/recommender/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/recommender/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1": { + "api_shortname": "redis", "distribution_name": "cloud.google.com/go/redis/apiv1", "description": "Google Cloud Memorystore for Redis API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/redis/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/redis/apiv1beta1": { + "api_shortname": "redis", "distribution_name": "cloud.google.com/go/redis/apiv1beta1", "description": "Google Cloud Memorystore for Redis API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/redis/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/apiv1beta1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/redis/cluster/apiv1": { + "api_shortname": "redis", + "distribution_name": "cloud.google.com/go/redis/cluster/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv2": { + "api_shortname": "cloudresourcemanager", "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", "description": "Cloud Resource Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/resourcemanager/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcemanager/apiv3": { + "api_shortname": "cloudresourcemanager", "distribution_name": "cloud.google.com/go/resourcemanager/apiv3", "description": "Cloud Resource Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/resourcemanager/apiv3", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcemanager/latest/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/resourcesettings/apiv1": { + "api_shortname": "resourcesettings", "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", "description": "Resource Settings API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/resourcesettings/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2": { + "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2", "description": "Retail API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/retail/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2alpha": { + "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2alpha", "description": "Retail API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/retail/apiv2alpha", - "release_level": "alpha", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2alpha", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/retail/apiv2beta": { + "api_shortname": "retail", "distribution_name": "cloud.google.com/go/retail/apiv2beta", "description": "Retail API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/retail/apiv2beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/retail/latest/apiv2beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/rpcreplay": { + "api_shortname": "rpcreplay", "distribution_name": "cloud.google.com/go/rpcreplay", "description": "RPC Replay", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/rpcreplay", + "release_level": "stable", "library_type": "OTHER" }, "cloud.google.com/go/run/apiv2": { + "api_shortname": "run", "distribution_name": "cloud.google.com/go/run/apiv2", "description": "Cloud Run Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/run/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/run/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/scheduler/apiv1": { + "api_shortname": "cloudscheduler", "distribution_name": "cloud.google.com/go/scheduler/apiv1", "description": "Cloud Scheduler API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/scheduler/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/scheduler/apiv1beta1": { + "api_shortname": "cloudscheduler", "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", "description": "Cloud Scheduler API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/scheduler/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/scheduler/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/secretmanager/apiv1": { + "api_shortname": "secretmanager", "distribution_name": "cloud.google.com/go/secretmanager/apiv1", "description": "Secret Manager API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/secretmanager/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/secretmanager/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/securesourcemanager/apiv1": { + "api_shortname": "securesourcemanager", + "distribution_name": "cloud.google.com/go/securesourcemanager/apiv1", + "description": "Secure Source Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/privateca/apiv1": { + "api_shortname": "privateca", "distribution_name": "cloud.google.com/go/security/privateca/apiv1", "description": "Certificate Authority API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/security/privateca/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/privateca/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/security/publicca/apiv1beta1": { + "api_shortname": "publicca", "distribution_name": "cloud.google.com/go/security/publicca/apiv1beta1", "description": "Public Certificate Authority API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/security/publicca/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/security/latest/publicca/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/apiv1", "description": "Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1beta1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", "description": "Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/apiv1p1beta1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", "description": "Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/apiv1p1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/apiv1p1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/securitycenter/settings/apiv1beta1": { + "api_shortname": "securitycenter", "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", "description": "Cloud Security Command Center API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/securitycenter/settings/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycenter/latest/settings/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicecontrol/apiv1": { + "api_shortname": "servicecontrol", "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", "description": "Service Control API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicecontrol/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicecontrol/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1": { + "api_shortname": "servicedirectory", "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", "description": "Service Directory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicedirectory/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicedirectory/apiv1beta1": { + "api_shortname": "servicedirectory", "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", "description": "Service Directory API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicedirectory/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicedirectory/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/servicemanagement/apiv1": { + "api_shortname": "servicemanagement", "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", "description": "Service Management API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/servicemanagement/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/servicemanagement/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/serviceusage/apiv1": { + "api_shortname": "serviceusage", "distribution_name": "cloud.google.com/go/serviceusage/apiv1", "description": "Service Usage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/serviceusage/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/serviceusage/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/shell/apiv1": { + "api_shortname": "cloudshell", "distribution_name": "cloud.google.com/go/shell/apiv1", "description": "Cloud Shell API", - "language": "Go", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shell/latest/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", + "description": "Merchant API", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/shell/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/inventories/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner", "description": "Cloud Spanner", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/spanner/admin/database/apiv1": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", "description": "Cloud Spanner API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/spanner/admin/database/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/database/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/admin/instance/apiv1": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", "description": "Cloud Spanner Instance Admin API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/spanner/admin/instance/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/admin/instance/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/spanner/apiv1": { + "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner/apiv1", "description": "Cloud Spanner API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/spanner/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1": { + "api_shortname": "speech", "distribution_name": "cloud.google.com/go/speech/apiv1", "description": "Cloud Speech-to-Text API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/speech/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv1p1beta1": { + "api_shortname": "speech", "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", "description": "Cloud Speech-to-Text API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/speech/apiv1p1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv1p1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/speech/apiv2": { + "api_shortname": "speech", "distribution_name": "cloud.google.com/go/speech/apiv2", "description": "Cloud Speech-to-Text API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/speech/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/speech/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storage": { + "api_shortname": "storage", "distribution_name": "cloud.google.com/go/storage", "description": "Cloud Storage (GCS)", - "language": "Go", + "language": "go", "client_library_type": "manual", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest", + "release_level": "stable", "library_type": "GAPIC_MANUAL" }, "cloud.google.com/go/storage/internal/apiv2": { + "api_shortname": "storage", "distribution_name": "cloud.google.com/go/storage/internal/apiv2", "description": "Cloud Storage API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/storage/internal/apiv2", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/internal/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storageinsights/apiv1": { + "api_shortname": "storageinsights", "distribution_name": "cloud.google.com/go/storageinsights/apiv1", "description": "Storage Insights API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/storageinsights/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storageinsights/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/storagetransfer/apiv1": { + "api_shortname": "storagetransfer", "distribution_name": "cloud.google.com/go/storagetransfer/apiv1", "description": "Storage Transfer API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/storagetransfer/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storagetransfer/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/support/apiv2": { + "api_shortname": "cloudsupport", "distribution_name": "cloud.google.com/go/support/apiv2", "description": "Google Cloud Support API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/support/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/support/latest/apiv2", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4": { + "api_shortname": "jobs", "distribution_name": "cloud.google.com/go/talent/apiv4", "description": "Cloud Talent Solution API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/talent/apiv4", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/talent/apiv4beta1": { + "api_shortname": "jobs", "distribution_name": "cloud.google.com/go/talent/apiv4beta1", "description": "Cloud Talent Solution API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/talent/apiv4beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/talent/latest/apiv4beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/texttospeech/apiv1": { + "api_shortname": "texttospeech", "distribution_name": "cloud.google.com/go/texttospeech/apiv1", "description": "Cloud Text-to-Speech API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/texttospeech/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/texttospeech/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/tpu/apiv1": { + "api_shortname": "tpu", "distribution_name": "cloud.google.com/go/tpu/apiv1", "description": "Cloud TPU API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/tpu/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/tpu/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv1": { + "api_shortname": "cloudtrace", "distribution_name": "cloud.google.com/go/trace/apiv1", "description": "Stackdriver Trace API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/trace/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/trace/apiv2": { + "api_shortname": "cloudtrace", "distribution_name": "cloud.google.com/go/trace/apiv2", "description": "Stackdriver Trace API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/trace/apiv2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/trace/latest/apiv2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/translate/apiv3": { + "api_shortname": "translate", "distribution_name": "cloud.google.com/go/translate/apiv3", "description": "Cloud Translation API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/translate/apiv3", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/translate/latest/apiv3", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/livestream/apiv1": { + "api_shortname": "livestream", "distribution_name": "cloud.google.com/go/video/livestream/apiv1", "description": "Live Stream API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/video/livestream/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/stitcher/apiv1": { + "api_shortname": "videostitcher", "distribution_name": "cloud.google.com/go/video/stitcher/apiv1", "description": "Video Stitcher API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/video/stitcher/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { + "api_shortname": "transcoder", "distribution_name": "cloud.google.com/go/video/transcoder/apiv1", "description": "Transcoder API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/video/transcoder/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/transcoder/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1": { + "api_shortname": "videointelligence", "distribution_name": "cloud.google.com/go/videointelligence/apiv1", "description": "Cloud Video Intelligence API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/videointelligence/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1beta2": { + "api_shortname": "videointelligence", "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", "description": "Google Cloud Video Intelligence API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/videointelligence/apiv1beta2", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1beta2", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "api_shortname": "videointelligence", "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", "description": "Cloud Video Intelligence API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/videointelligence/apiv1p3beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vision/v2/apiv1": { + "api_shortname": "vision", "distribution_name": "cloud.google.com/go/vision/v2/apiv1", "description": "Cloud Vision API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vision/v2/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vision/v2/apiv1p1beta1": { + "api_shortname": "vision", "distribution_name": "cloud.google.com/go/vision/v2/apiv1p1beta1", "description": "Cloud Vision API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vision/v2/apiv1p1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vision/v2/latest/apiv1p1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmmigration/apiv1": { + "api_shortname": "vmmigration", "distribution_name": "cloud.google.com/go/vmmigration/apiv1", "description": "VM Migration API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vmmigration/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vmwareengine/apiv1": { + "api_shortname": "vmwareengine", "distribution_name": "cloud.google.com/go/vmwareengine/apiv1", "description": "VMware Engine API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vmwareengine/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmwareengine/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { + "api_shortname": "vpcaccess", "distribution_name": "cloud.google.com/go/vpcaccess/apiv1", "description": "Serverless VPC Access API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/vpcaccess/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vpcaccess/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1": { + "api_shortname": "webrisk", "distribution_name": "cloud.google.com/go/webrisk/apiv1", "description": "Web Risk API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/webrisk/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/webrisk/apiv1beta1": { + "api_shortname": "webrisk", "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", "description": "Web Risk API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/webrisk/apiv1beta1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/webrisk/latest/apiv1beta1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/websecurityscanner/apiv1": { + "api_shortname": "websecurityscanner", "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", "description": "Web Security Scanner API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/websecurityscanner/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/websecurityscanner/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1": { + "api_shortname": "workflows", "distribution_name": "cloud.google.com/go/workflows/apiv1", "description": "Workflows API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1beta": { + "api_shortname": "workflows", "distribution_name": "cloud.google.com/go/workflows/apiv1beta", "description": "Workflows API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1": { + "api_shortname": "workflowexecutions", "distribution_name": "cloud.google.com/go/workflows/executions/apiv1", "description": "Workflow Executions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/executions/apiv1", - "release_level": "ga", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/executions/apiv1beta": { + "api_shortname": "workflowexecutions", "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", "description": "Workflow Executions API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workflows/executions/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/executions/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workstations/apiv1": { + "api_shortname": "workstations", "distribution_name": "cloud.google.com/go/workstations/apiv1", "description": "Cloud Workstations API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workstations/apiv1", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1", + "release_level": "preview", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workstations/apiv1beta": { + "api_shortname": "workstations", "distribution_name": "cloud.google.com/go/workstations/apiv1beta", "description": "Cloud Workstations API", - "language": "Go", + "language": "go", "client_library_type": "generated", - "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go\ncloud.google.com/go/accessapproval\ncloud.google.com/go/accesscontextmanager\ncloud.google.com/go/advisorynotifications\ncloud.google.com/go/aiplatform\ncloud.google.com/go/alloydb\ncloud.google.com/go/analytics\ncloud.google.com/go/apigateway\ncloud.google.com/go/apigeeconnect\ncloud.google.com/go/apigeeregistry\ncloud.google.com/go/apikeys\ncloud.google.com/go/appengine\ncloud.google.com/go/area120\ncloud.google.com/go/artifactregistry\ncloud.google.com/go/asset\ncloud.google.com/go/assuredworkloads\ncloud.google.com/go/automl\ncloud.google.com/go/baremetalsolution\ncloud.google.com/go/batch\ncloud.google.com/go/beyondcorp\ncloud.google.com/go/bigquery\ncloud.google.com/go/bigtable\ncloud.google.com/go/billing\ncloud.google.com/go/binaryauthorization\ncloud.google.com/go/certificatemanager\ncloud.google.com/go/channel\ncloud.google.com/go/cloudbuild\ncloud.google.com/go/clouddms\ncloud.google.com/go/cloudtasks\ncloud.google.com/go/compute\ncloud.google.com/go/compute/metadata\ncloud.google.com/go/confidentialcomputing\ncloud.google.com/go/contactcenterinsights\ncloud.google.com/go/container\ncloud.google.com/go/containeranalysis\ncloud.google.com/go/datacatalog\ncloud.google.com/go/dataflow\ncloud.google.com/go/dataform\ncloud.google.com/go/datafusion\ncloud.google.com/go/datalabeling\ncloud.google.com/go/dataplex\ncloud.google.com/go/dataproc/v2\ncloud.google.com/go/dataqna\ncloud.google.com/go/datastore\ncloud.google.com/go/datastream\ncloud.google.com/go/deploy\ncloud.google.com/go/dialogflow\ncloud.google.com/go/discoveryengine\ncloud.google.com/go/dlp\ncloud.google.com/go/documentai\ncloud.google.com/go/domains\ncloud.google.com/go/edgecontainer\ncloud.google.com/go/errorreporting\ncloud.google.com/go/essentialcontacts\ncloud.google.com/go/eventarc\ncloud.google.com/go/filestore\ncloud.google.com/go/firestore\ncloud.google.com/go/functions\ncloud.google.com/go/gaming\ncloud.google.com/go/gkebackup\ncloud.google.com/go/gkeconnect\ncloud.google.com/go/gkehub\ncloud.google.com/go/gkemulticloud\ncloud.google.com/go/grafeas\ncloud.google.com/go/gsuiteaddons\ncloud.google.com/go/iam\ncloud.google.com/go/iap\ncloud.google.com/go/ids\ncloud.google.com/go/internal/actions\ncloud.google.com/go/internal/aliasfix\ncloud.google.com/go/internal/aliasgen\ncloud.google.com/go/internal/carver\ncloud.google.com/go/internal/examples/fake\ncloud.google.com/go/internal/examples/mock\ncloud.google.com/go/internal/gapicgen\ncloud.google.com/go/internal/generated\ncloud.google.com/go/internal/gensnippets\ncloud.google.com/go/internal/godocfx\ncloud.google.com/go/internal/postprocessor\ncloud.google.com/go/iot\ncloud.google.com/go/kms\ncloud.google.com/go/language\ncloud.google.com/go/lifesciences\ncloud.google.com/go/logging\ncloud.google.com/go/longrunning\ncloud.google.com/go/managedidentities\ncloud.google.com/go/maps\ncloud.google.com/go/mediatranslation\ncloud.google.com/go/memcache\ncloud.google.com/go/metastore\ncloud.google.com/go/monitoring\ncloud.google.com/go/networkconnectivity\ncloud.google.com/go/networkmanagement\ncloud.google.com/go/networksecurity\ncloud.google.com/go/notebooks\ncloud.google.com/go/optimization\ncloud.google.com/go/orchestration\ncloud.google.com/go/orgpolicy\ncloud.google.com/go/osconfig\ncloud.google.com/go/oslogin\ncloud.google.com/go/phishingprotection\ncloud.google.com/go/policytroubleshooter\ncloud.google.com/go/privatecatalog\ncloud.google.com/go/profiler\ncloud.google.com/go/pubsub\ncloud.google.com/go/pubsublite\ncloud.google.com/go/recaptchaenterprise\ncloud.google.com/go/recaptchaenterprise/v2\ncloud.google.com/go/recommendationengine\ncloud.google.com/go/recommender\ncloud.google.com/go/redis\ncloud.google.com/go/resourcemanager\ncloud.google.com/go/resourcesettings\ncloud.google.com/go/retail\ncloud.google.com/go/run\ncloud.google.com/go/scheduler\ncloud.google.com/go/secretmanager\ncloud.google.com/go/security\ncloud.google.com/go/securitycenter\ncloud.google.com/go/servicecontrol\ncloud.google.com/go/servicedirectory\ncloud.google.com/go/servicemanagement\ncloud.google.com/go/serviceusage\ncloud.google.com/go/shell\ncloud.google.com/go/spanner\ncloud.google.com/go/speech\ncloud.google.com/go/storage\nmain\ncloud.google.com/go/storagetransfer\ncloud.google.com/go/talent\ncloud.google.com/go/texttospeech\ncloud.google.com/go/tpu\ncloud.google.com/go/trace\ncloud.google.com/go/translate\ncloud.google.com/go/video\ncloud.google.com/go/videointelligence\ncloud.google.com/go/vision\ncloud.google.com/go/vision/v2\ncloud.google.com/go/vmmigration\ncloud.google.com/go/vmwareengine\ncloud.google.com/go/vpcaccess\ncloud.google.com/go/webrisk\ncloud.google.com/go/websecurityscanner\ncloud.google.com/go/workflows\ncloud.google.com/go/workstations/latest/cloud.google.com/go/workstations/apiv1beta", - "release_level": "beta", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workstations/latest/apiv1beta", + "release_level": "preview", "library_type": "GAPIC_AUTO" } } diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md index b38a4c1a2d..972857e9d5 100644 --- a/vendor/cloud.google.com/go/internal/README.md +++ b/vendor/cloud.google.com/go/internal/README.md @@ -17,21 +17,6 @@ tools would then talk to pkg.go.dev or some other service to get the overall list of packages and use the `.repo-metadata.json` files to get the additional metadata required. For now, `.repo-metadata-full.json` includes everything. -## cloudbuild.yaml - -The `cloudbuild.yaml` Cloud Build configuration currently supports: - -* Building a docker container from the `internal/postprocessor/Dockerfile`. - -The build can be run locally in the `google-cloud-go` root directory: - -```bash -gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml -``` - -See the [postprocessor/README](postprocessor/README.md) for instructions -regarding updating the post-processor docker container. - ### Updating OwlBot SHA You may want to manually update the which version of the post-processor will be diff --git a/vendor/cloud.google.com/go/internal/cloudbuild.yaml b/vendor/cloud.google.com/go/internal/cloudbuild.yaml deleted file mode 100644 index 71281cec24..0000000000 --- a/vendor/cloud.google.com/go/internal/cloudbuild.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# note: /workspace is a special directory in the docker image where all the files in this folder -# get placed on your behalf - -timeout: 7200s # 2 hours -steps: -- name: gcr.io/cloud-builders/docker - args: ['build', '-t', 'gcr.io/cloud-devrel-public-resources/owlbot-go', '-f', 'postprocessor/Dockerfile', '.'] - dir: internal - -images: -- gcr.io/cloud-devrel-public-resources/owlbot-go:latest diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go index 2943a6d0b4..4c9220e0dc 100644 --- a/vendor/cloud.google.com/go/internal/retry.go +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -20,7 +20,6 @@ import ( "time" gax "github.com/googleapis/gax-go/v2" - "google.golang.org/grpc/status" ) // Retry calls the supplied function f repeatedly according to the provided @@ -75,11 +74,3 @@ func (e wrappedCallErr) Unwrap() error { func (e wrappedCallErr) Is(err error) bool { return e.ctxErr == err || e.wrappedErr == err } - -// GRPCStatus allows the wrapped error to be used with status.FromError. -func (e wrappedCallErr) GRPCStatus() *status.Status { - if s, ok := status.FromError(e.wrappedErr); ok { - return s - } - return nil -} diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 3f4097faea..30ee040f76 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,123 @@ # Changes +## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09) + + +### Bug Fixes + +* **storage:** Rename aux.go to auxiliary.go fixing windows build ([ba23673](https://github.com/googleapis/google-cloud-go/commit/ba23673da7707c31292e4aa29d65b7ac1446d4a6)) + +## [1.35.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.1...storage/v1.35.0) (2023-11-09) + + +### Features + +* **storage:** Change gRPC writes to use bi-directional streams ([#8930](https://github.com/googleapis/google-cloud-go/issues/8930)) ([3e23a36](https://github.com/googleapis/google-cloud-go/commit/3e23a364b1a20c4fda7aef257e4136586ec769a4)) + +## [1.34.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.0...storage/v1.34.1) (2023-11-01) + + +### Bug Fixes + +* **storage:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.33.0...storage/v1.34.0) (2023-10-31) + + +### Features + +* **storage/internal:** Add match_glob field to ListObjectsRequest ([#8618](https://github.com/googleapis/google-cloud-go/issues/8618)) ([e9ae601](https://github.com/googleapis/google-cloud-go/commit/e9ae6018983ae09781740e4ff939e6e365863dbb)) +* **storage/internal:** Add terminal_storage_class fields to Autoclass message ([57fc1a6](https://github.com/googleapis/google-cloud-go/commit/57fc1a6de326456eb68ef25f7a305df6636ed386)) +* **storage/internal:** Adds the RestoreObject operation ([56ce871](https://github.com/googleapis/google-cloud-go/commit/56ce87195320634b07ae0b012efcc5f2b3813fb0)) +* **storage:** Support autoclass v2.1 ([#8721](https://github.com/googleapis/google-cloud-go/issues/8721)) ([fe1e195](https://github.com/googleapis/google-cloud-go/commit/fe1e19590a252c6adc6ca6c51a69b6e561e143b8)) +* **storage:** Support MatchGlob for gRPC ([#8670](https://github.com/googleapis/google-cloud-go/issues/8670)) ([3df0287](https://github.com/googleapis/google-cloud-go/commit/3df0287f88d5e2c4526e9e6b8dc2a4ca54f88918)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) + + +### Bug Fixes + +* **storage:** Drop stream reference after closing it for gRPC writes ([#8872](https://github.com/googleapis/google-cloud-go/issues/8872)) ([525abde](https://github.com/googleapis/google-cloud-go/commit/525abdee433864d4d456f1f1fff5599017b557ff)) +* **storage:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **storage:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **storage:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07) + + +### Features + +* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a)) + +## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15) + + +### Features + +* **storage:** Add support for custom headers ([#8294](https://github.com/googleapis/google-cloud-go/issues/8294)) ([313fd4a](https://github.com/googleapis/google-cloud-go/commit/313fd4a60380d36c5ecaead3e968dbc84d044a0b)) +* **storage:** Add trace span to Writer ([#8375](https://github.com/googleapis/google-cloud-go/issues/8375)) ([f7ac85b](https://github.com/googleapis/google-cloud-go/commit/f7ac85bec2806d351529714bd7744a91a9fdefdd)), refs [#6144](https://github.com/googleapis/google-cloud-go/issues/6144) +* **storage:** Support single-shot uploads in gRPC ([#8348](https://github.com/googleapis/google-cloud-go/issues/8348)) ([7de4a7d](https://github.com/googleapis/google-cloud-go/commit/7de4a7da31ab279a343b1592b15a126cda03e5e7)), refs [#7798](https://github.com/googleapis/google-cloud-go/issues/7798) +* **storage:** Trace span covers life of a Reader ([#8390](https://github.com/googleapis/google-cloud-go/issues/8390)) ([8de30d7](https://github.com/googleapis/google-cloud-go/commit/8de30d752eec2fed2ea4c127482d3e213f9050e2)) + + +### Bug Fixes + +* **storage:** Fix AllObjects condition in gRPC ([#8184](https://github.com/googleapis/google-cloud-go/issues/8184)) ([2b99e4f](https://github.com/googleapis/google-cloud-go/commit/2b99e4f39be20fe21e8bc5c1ec1c0e758222c46e)), refs [#6205](https://github.com/googleapis/google-cloud-go/issues/6205) +* **storage:** Fix gRPC generation/condition issues ([#8396](https://github.com/googleapis/google-cloud-go/issues/8396)) ([ca68ff5](https://github.com/googleapis/google-cloud-go/commit/ca68ff54b680732b59b223655070d0f6abccefee)) +* **storage:** Same method name and Trace Span name ([#8150](https://github.com/googleapis/google-cloud-go/issues/8150)) ([e277213](https://github.com/googleapis/google-cloud-go/commit/e2772133896bb94097b5d1f090f1bcafd136f2ed)) +* **storage:** Update gRPC retry codes ([#8202](https://github.com/googleapis/google-cloud-go/issues/8202)) ([afdf772](https://github.com/googleapis/google-cloud-go/commit/afdf772fc6a90b3010eee9d70ab65e22e276f53f)) + +## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.1...storage/v1.31.0) (2023-06-27) + + +### Features + +* **storage/internal:** Add ctype=CORD for ChecksummedData.content ([ca94e27](https://github.com/googleapis/google-cloud-go/commit/ca94e2724f9e2610b46aefd0a3b5ddc06102e91b)) +* **storage:** Add support for MatchGlob ([#8097](https://github.com/googleapis/google-cloud-go/issues/8097)) ([9426a5a](https://github.com/googleapis/google-cloud-go/commit/9426a5a45d4c2fd07f84261f6d602680e79cdc48)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) [#7728](https://github.com/googleapis/google-cloud-go/issues/7728) +* **storage:** Respect WithEndpoint for SignedURLs and PostPolicy ([#8113](https://github.com/googleapis/google-cloud-go/issues/8113)) ([f918f23](https://github.com/googleapis/google-cloud-go/commit/f918f23a3cda4fbc8d709e32b914ead8b735d664)) +* **storage:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6)) + + +### Bug Fixes + +* **storage:** Fix CreateBucket logic for gRPC ([#8165](https://github.com/googleapis/google-cloud-go/issues/8165)) ([8424e7e](https://github.com/googleapis/google-cloud-go/commit/8424e7e145a117c91006318fa924a8b2643c1c7e)), refs [#8162](https://github.com/googleapis/google-cloud-go/issues/8162) +* **storage:** Fix reads with "./" in object names [XML] ([#8017](https://github.com/googleapis/google-cloud-go/issues/8017)) ([6b7b21f](https://github.com/googleapis/google-cloud-go/commit/6b7b21f8a334b6ad3a25e1f66ae1265b4d1f0995)) +* **storage:** Fix routing header for writes ([#8159](https://github.com/googleapis/google-cloud-go/issues/8159)) ([42a59f5](https://github.com/googleapis/google-cloud-go/commit/42a59f5a23ab9b4743ab032ad92304922c801d93)), refs [#8142](https://github.com/googleapis/google-cloud-go/issues/8142) [#8143](https://github.com/googleapis/google-cloud-go/issues/8143) [#8144](https://github.com/googleapis/google-cloud-go/issues/8144) [#8145](https://github.com/googleapis/google-cloud-go/issues/8145) [#8149](https://github.com/googleapis/google-cloud-go/issues/8149) +* **storage:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b)) +* **storage:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef)) + + +### Documentation + +* **storage/internal:** Clarifications about behavior of DeleteObject RPC ([3f1ed9c](https://github.com/googleapis/google-cloud-go/commit/3f1ed9c63fb115f47607a3ab478842fe5ba0df11)) +* **storage/internal:** Clarified the behavior of supplying bucket.name field in CreateBucket to reflect actual implementation ([ebae64d](https://github.com/googleapis/google-cloud-go/commit/ebae64d53397ec5dfe851f098754eaa1f5df7cb1)) +* **storage/internal:** Revert ChecksummedData message definition not to specify ctype=CORD, because it would be a breaking change. ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) +* **storage/internal:** Update routing annotations for CancelResumableWriteRequest and QueryWriteStatusRequest ([4900851](https://github.com/googleapis/google-cloud-go/commit/49008518e168fe6f7891b907d6fc14eecdef758c)) +* **storage/internal:** Updated ChecksummedData message definition to specify ctype=CORD, and removed incorrect earlier attempt that set that annotation in the ReadObjectResponse message definition ([ef61e47](https://github.com/googleapis/google-cloud-go/commit/ef61e4799280a355b960da8ae240ceb2efbe71ac)) +* **storage:** WithXMLReads should mention XML instead of JSON API ([#7881](https://github.com/googleapis/google-cloud-go/issues/7881)) ([36f56c8](https://github.com/googleapis/google-cloud-go/commit/36f56c80c456ca74ffc03df76844ce15980ced82)) + +## [1.30.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.30.0...storage/v1.30.1) (2023-03-21) + + +### Bug Fixes + +* **storage:** Retract versions with Copier bug ([#7583](https://github.com/googleapis/google-cloud-go/issues/7583)) ([9c10b6f](https://github.com/googleapis/google-cloud-go/commit/9c10b6f8a54cb8447260148b5e4a9b5160281020)) + * Versions v1.25.0-v1.27.0 are retracted due to [#6857](https://github.com/googleapis/google-cloud-go/issues/6857). +* **storage:** SignedURL v4 allows headers with colons in value ([#7603](https://github.com/googleapis/google-cloud-go/issues/7603)) ([6b50f9b](https://github.com/googleapis/google-cloud-go/commit/6b50f9b368f5b271ade1706c342865cef46712e6)) + +## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.29.0...storage/v1.30.0) (2023-03-15) + + +### Features + +* **storage/internal:** Update routing annotation for CreateBucketRequest docs: Add support for end-to-end checksumming in the gRPC WriteObject flow feat!: BREAKING CHANGE - renaming Notification to NotificationConfig ([2fef56f](https://github.com/googleapis/google-cloud-go/commit/2fef56f75a63dc4ff6e0eea56c7b26d4831c8e27)) +* **storage:** Json downloads ([#7158](https://github.com/googleapis/google-cloud-go/issues/7158)) ([574a86c](https://github.com/googleapis/google-cloud-go/commit/574a86c614445f8c3f5a54446820df774c31cd47)) +* **storage:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd)) + + +### Bug Fixes + +* **storage:** Specify credentials with STORAGE_EMULATOR_HOST ([#7271](https://github.com/googleapis/google-cloud-go/issues/7271)) ([940ae15](https://github.com/googleapis/google-cloud-go/commit/940ae15f725ff384e345e627feb03d22e1fd8db5)) + ## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.1...storage/v1.29.0) (2023-01-19) diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go index e0ab60073c..74799e55e9 100644 --- a/vendor/cloud.google.com/go/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -20,7 +20,7 @@ import ( "reflect" "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" raw "google.golang.org/api/storage/v1" ) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 19f266ef1e..3818c4498c 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -27,7 +27,7 @@ import ( "cloud.google.com/go/compute/metadata" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "google.golang.org/api/googleapi" "google.golang.org/api/iamcredentials/v1" "google.golang.org/api/iterator" @@ -152,7 +152,7 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error // Update updates a bucket's attributes. func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update") defer func() { trace.EndSpan(ctx, err) }() isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0 @@ -173,12 +173,18 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) ( // [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) { - if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return SignedURL(b.name, object, opts) - } // Make a copy of opts so we don't modify the pointer parameter. newopts := opts.clone() + if newopts.Hostname == "" { + // Extract the correct host from the readhost set on the client + newopts.Hostname = b.c.xmlHost + } + + if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return SignedURL(b.name, object, newopts) + } + if newopts.GoogleAccessID == "" { id, err := b.detectDefaultGoogleAccessID() if err != nil { @@ -215,12 +221,18 @@ func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, // // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { - if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { - return GenerateSignedPostPolicyV4(b.name, object, opts) - } // Make a copy of opts so we don't modify the pointer parameter. newopts := opts.clone() + if newopts.Hostname == "" { + // Extract the correct host from the readhost set on the client + newopts.Hostname = b.c.xmlHost + } + + if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) { + return GenerateSignedPostPolicyV4(b.name, object, newopts) + } + if newopts.GoogleAccessID == "" { id, err := b.detectDefaultGoogleAccessID() if err != nil { @@ -728,6 +740,13 @@ type Autoclass struct { // If Autoclass is enabled when the bucket is created, the ToggleTime // is set to the bucket creation time. This field is read-only. ToggleTime time.Time + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string + // TerminalStorageClassUpdateTime represents the time of the most recent + // update to "TerminalStorageClass". + TerminalStorageClassUpdateTime time.Time } func newBucket(b *raw.Bucket) (*BucketAttrs, error) { @@ -921,8 +940,6 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { return &storagepb.Bucket{} } - // TODO(cathyo): Handle labels. Pending b/230510191. - var v *storagepb.Bucket_Versioning if ua.VersioningEnabled != nil { v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)} @@ -996,6 +1013,7 @@ func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket { IamConfig: bktIAM, Rpo: ua.RPO.String(), Autoclass: ua.Autoclass.toProtoAutoclass(), + Labels: ua.setLabels, } } @@ -1230,9 +1248,11 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.Autoclass != nil { rb.Autoclass = &raw.BucketAutoclass{ - Enabled: ua.Autoclass.Enabled, - ForceSendFields: []string{"Enabled"}, + Enabled: ua.Autoclass.Enabled, + TerminalStorageClass: ua.Autoclass.TerminalStorageClass, + ForceSendFields: []string{"Enabled"}, } + rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. @@ -1264,7 +1284,9 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } // If returns a new BucketHandle that applies a set of preconditions. -// Preconditions already set on the BucketHandle are ignored. +// Preconditions already set on the BucketHandle are ignored. The supplied +// BucketConditions must have exactly one field set to a non-zero value; +// otherwise an error will be returned from any operation on the BucketHandle. // Operations on the new handle will return an error if the preconditions are not // satisfied. The only valid preconditions for buckets are MetagenerationMatch // and MetagenerationNotMatch. @@ -1545,7 +1567,6 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { // doc states "format: int32"), so the client types used int64, // but the proto uses int32 so we have a potentially lossy // conversion. - AgeDays: proto.Int32(int32(r.Condition.AgeInDays)), DaysSinceCustomTime: proto.Int32(int32(r.Condition.DaysSinceCustomTime)), DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)), MatchesPrefix: r.Condition.MatchesPrefix, @@ -1555,7 +1576,11 @@ func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle { }, } - // TODO(#6205): This may not be needed for gRPC + // Only set AgeDays in the proto if it is non-zero, or if the user has set + // Condition.AllObjects. + if r.Condition.AgeInDays != 0 { + rr.Condition.AgeDays = proto.Int32(int32(r.Condition.AgeInDays)) + } if r.Condition.AllObjects { rr.Condition.AgeDays = proto.Int32(0) } @@ -1654,8 +1679,8 @@ func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle { }, } - // TODO(#6205): This may not be needed for gRPC - if rr.GetCondition().GetAgeDays() == 0 { + // Only set Condition.AllObjects if AgeDays is zero, not if it is nil. + if rr.GetCondition().AgeDays != nil && rr.GetCondition().GetAgeDays() == 0 { r.Condition.AllObjects = true } @@ -1938,9 +1963,10 @@ func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { if a == nil { return nil } - // Excluding read only field ToggleTime. + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. return &raw.BucketAutoclass{ - Enabled: a.Enabled, + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, } } @@ -1948,27 +1974,34 @@ func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { if a == nil { return nil } - // Excluding read only field ToggleTime. - return &storagepb.Bucket_Autoclass{ + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. + ba := &storagepb.Bucket_Autoclass{ Enabled: a.Enabled, } + if a.TerminalStorageClass != "" { + ba.TerminalStorageClass = &a.TerminalStorageClass + } + return ba } func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { if a == nil || a.ToggleTime == "" { return nil } - // Return Autoclass.ToggleTime only if parsed with a valid value. + ac := &Autoclass{ + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, + } + // Return ToggleTime and TSCUpdateTime only if parsed with valid values. t, err := time.Parse(time.RFC3339, a.ToggleTime) - if err != nil { - return &Autoclass{ - Enabled: a.Enabled, - } + if err == nil { + ac.ToggleTime = t } - return &Autoclass{ - Enabled: a.Enabled, - ToggleTime: t, + ut, err := time.Parse(time.RFC3339, a.TerminalStorageClassUpdateTime) + if err == nil { + ac.TerminalStorageClassUpdateTime = ut } + return ac } func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { @@ -1976,8 +2009,10 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { return nil } return &Autoclass{ - Enabled: a.GetEnabled(), - ToggleTime: a.GetToggleTime().AsTime(), + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + TerminalStorageClass: a.GetTerminalStorageClass(), + TerminalStorageClassUpdateTime: a.GetTerminalStorageClassUpdateTime().AsTime(), } } diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index d579a2b1ee..3bed9b64c2 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -19,9 +19,9 @@ import ( "io" "time" + "cloud.google.com/go/iam/apiv1/iampb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/option" - iampb "google.golang.org/genproto/googleapis/iam/v1" ) // TODO(noahdietz): Move existing factory methods to this file. diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 8bf3098431..22adb744fe 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -36,6 +36,9 @@ The client will use your default application credentials. Clients should be reused instead of created as needed. The methods of [Client] are safe for concurrent use by multiple goroutines. +You may configure the client by passing in options from the [google.golang.org/api/option] +package. You may also use options defined in this package, such as [WithJSONReads]. + If you only wish to access public data, you can create an unauthenticated client with @@ -317,6 +320,44 @@ client (using [Client.SetRetry]). For example: // Handle err. } +# Sending Custom Headers + +You can add custom headers to any API call made by this package by using +[callctx.SetHeaders] on the context which is passed to the method. For example, +to add a [custom audit logging] header: + + ctx := context.Background() + ctx = callctx.SetHeaders(ctx, "x-goog-custom-audit-", "") + // Use client as usual with the context and the additional headers will be sent. + client.Bucket("my-bucket").Attrs(ctx) + +# Experimental gRPC API + +This package includes support for the Cloud Storage gRPC API, which is currently +in preview. This implementation uses gRPC rather than the current JSON & XML +APIs to make requests to Cloud Storage. If you would like to try the API, +please contact your GCP account rep for more information. The gRPC API is not +yet generally available, so it may be subject to breaking changes. + +To create a client which will use gRPC, use the alternate constructor: + + ctx := context.Background() + client, err := storage.NewGRPCClient(ctx) + if err != nil { + // TODO: Handle error. + } + // Use client as usual. + +If the application is running within GCP, users may get better performance by +enabling DirectPath (enabling requests to skip some proxy steps). To enable, +set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add +the following side-effect imports to your application: + + import ( + _ "google.golang.org/grpc/balancer/rls" + _ "google.golang.org/grpc/xds/googledirectpath" + ) + [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy @@ -324,5 +365,6 @@ client (using [Client.SetRetry]). For example: [gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login [impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account [IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview +[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index 1dfb6f8302..99dfba4677 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -17,19 +17,21 @@ package storage import ( "context" "encoding/base64" + "errors" "fmt" "io" "net/url" "os" + "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" gapic "cloud.google.com/go/storage/internal/apiv2" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -38,13 +40,14 @@ import ( ) const ( - // defaultConnPoolSize is the default number of connections + // defaultConnPoolSize is the default number of channels // to initialize in the GAPIC gRPC connection pool. A larger // connection pool may be necessary for jobs that require - // high throughput and/or leverage many concurrent streams. + // high throughput and/or leverage many concurrent streams + // if not running via DirectPath. // // This is only used for the gRPC client. - defaultConnPoolSize = 4 + defaultConnPoolSize = 1 // maxPerMessageWriteSize is the maximum amount of content that can be sent // per WriteObjectRequest message. A buffer reaching this amount will @@ -110,6 +113,11 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl s := initSettings(opts...) s.clientOption = append(defaultGRPCOptions(), s.clientOption...) + config := newStorageConfig(s.clientOption...) + if config.readAPIWasSet { + return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads") + } + g, err := gapic.NewClient(ctx, s.clientOption...) if err != nil { return nil, err @@ -133,11 +141,11 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin Project: toProjectResource(project), } var resp *storagepb.ServiceAccount - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -147,7 +155,7 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) b := attrs.toProtoBucket() - b.Name = bucket + b.Project = toProjectResource(project) // If there is lifecycle information but no location, explicitly set // the location. This is a GCS quirk/bug. if b.GetLocation() == "" && b.GetLifecycle() != nil { @@ -155,9 +163,9 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket st } req := &storagepb.CreateBucketRequest{ - Parent: toProjectResource(project), + Parent: fmt.Sprintf("projects/%s", globalProjectAlias), Bucket: b, - BucketId: b.GetName(), + BucketId: bucket, } if attrs != nil { req.PredefinedAcl = attrs.PredefinedACL @@ -165,13 +173,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket st } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.CreateBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return battrs, err } @@ -185,26 +193,26 @@ func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opt var gitr *gapic.BucketIterator fetch := func(pageSize int, pageToken string) (token string, err error) { - // Initialize GAPIC-based iterator when pageToken is empty, which - // indicates that this fetch call is attempting to get the first page. - // - // Note: Initializing the GAPIC-based iterator lazily is necessary to - // capture the BucketIterator.Prefix set by the user *after* the - // BucketIterator is returned to them from the veneer. - if pageToken == "" { - req := &storagepb.ListBucketsRequest{ - Parent: toProjectResource(it.projectID), - Prefix: it.Prefix, - } - gitr = c.raw.ListBuckets(it.ctx, req, s.gax...) - } var buckets []*storagepb.Bucket var next string - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { + // Initialize GAPIC-based iterator when pageToken is empty, which + // indicates that this fetch call is attempting to get the first page. + // + // Note: Initializing the GAPIC-based iterator lazily is necessary to + // capture the BucketIterator.Prefix set by the user *after* the + // BucketIterator is returned to them from the veneer. + if pageToken == "" { + req := &storagepb.ListBucketsRequest{ + Parent: toProjectResource(it.projectID), + Prefix: it.Prefix, + } + gitr = c.raw.ListBuckets(ctx, req, s.gax...) + } buckets, next, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -238,9 +246,9 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con ctx = setUserProjectMetadata(ctx, s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteBucket(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { @@ -257,13 +265,13 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.GetBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return nil, ErrBucketNotExist @@ -348,15 +356,24 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat if uattrs.Autoclass != nil { fieldMask.Paths = append(fieldMask.Paths, "autoclass") } - // TODO(cathyo): Handle labels. Pending b/230510191. + + for label := range uattrs.setLabels { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) + } + + // Delete a label by not including it in Bucket.Labels but adding the key to the update mask. + for label := range uattrs.deleteLabels { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("labels.%s", label)) + } + req.UpdateMask = fieldMask var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return battrs, err } @@ -369,10 +386,10 @@ func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucke return err } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { @@ -391,18 +408,21 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q LexicographicStart: it.query.StartOffset, LexicographicEnd: it.query.EndOffset, IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - gitr := c.raw.ListObjects(it.ctx, req, s.gax...) fetch := func(pageSize int, pageToken string) (token string, err error) { var objects []*storagepb.Object - err = run(it.ctx, func() error { + var gitr *gapic.ObjectIterator + err = run(it.ctx, func(ctx context.Context) error { + gitr = c.raw.ListObjects(ctx, req, s.gax...) + it.ctx = ctx objects, token, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { err = ErrBucketNotExist @@ -445,9 +465,9 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { return c.raw.DeleteObject(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } @@ -473,12 +493,12 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string } var attrs *ObjectAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.GetObject(ctx, req, s.gax...) attrs = newObjectFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return nil, ErrObjectNotExist @@ -490,11 +510,15 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { s := callSettings(c.settings, opts...) o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) + // For Update, generation is passed via the object message rather than a field on the request. + if gen >= 0 { + o.Generation = gen + } req := &storagepb.UpdateObjectRequest{ Object: o, PredefinedAcl: uattrs.PredefinedACL, } - if err := applyCondsProto("grpcStorageClient.UpdateObject", gen, conds, req); err != nil { + if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, conds, req); err != nil { return nil, err } if s.userProject != "" { @@ -534,16 +558,28 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str if uattrs.ACL != nil || len(uattrs.PredefinedACL) > 0 { fieldMask.Paths = append(fieldMask.Paths, "acl") } - // TODO(cathyo): Handle metadata. Pending b/230510191. + + if uattrs.Metadata != nil { + // We don't support deleting a specific metadata key; metadata is deleted + // as a whole if provided an empty map, so we do not use dot notation here + if len(uattrs.Metadata) == 0 { + fieldMask.Paths = append(fieldMask.Paths, "metadata") + } else { + // We can, however, use dot notation for adding keys + for key := range uattrs.Metadata { + fieldMask.Paths = append(fieldMask.Paths, fmt.Sprintf("metadata.%s", key)) + } + } + } req.UpdateMask = fieldMask var attrs *ObjectAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateObject(ctx, req, s.gax...) attrs = newObjectFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { return nil, ErrObjectNotExist } @@ -749,17 +785,18 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket) dstObjPb.Name = req.dstObject.name - if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil { - return nil, err - } + if req.sendCRC32C { dstObjPb.Checksums.Crc32C = &req.dstObject.attrs.CRC32C } srcs := []*storagepb.ComposeObjectRequest_SourceObject{} for _, src := range req.srcs { - srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name} - if err := applyCondsProto("ComposeObject source", src.gen, src.conds, srcObjPb); err != nil { + srcObjPb := &storagepb.ComposeObjectRequest_SourceObject{Name: src.name, ObjectPreconditions: &storagepb.ComposeObjectRequest_SourceObject_ObjectPreconditions{}} + if src.gen >= 0 { + srcObjPb.Generation = src.gen + } + if err := applyCondsProto("ComposeObject source", defaultGen, src.conds, srcObjPb.ObjectPreconditions); err != nil { return nil, err } srcs = append(srcs, srcObjPb) @@ -769,6 +806,9 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec Destination: dstObjPb, SourceObjects: srcs, } + if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, rawReq); err != nil { + return nil, err + } if req.predefinedACL != "" { rawReq.DestinationPredefinedAcl = req.predefinedACL } @@ -778,10 +818,10 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec var obj *storagepb.Object var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } @@ -828,9 +868,9 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var res *storagepb.RewriteResponse var err error - retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + retryCall := func(ctx context.Context) error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } @@ -855,13 +895,6 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange ctx = setUserProjectMetadata(ctx, s.userProject) } - // A negative length means "read to the end of the object", but the - // read_limit field it corresponds to uses zero to mean the same thing. Thus - // we coerce the length to 0 to read to the end of the object. - if params.length < 0 { - params.length = 0 - } - b := bucketResourceName(globalProjectAlias, params.bucket) req := &storagepb.ReadObjectRequest{ Bucket: b, @@ -884,13 +917,13 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange cc, cancel := context.WithCancel(ctx) - start := params.offset + seen - // Only set a ReadLimit if length is greater than zero, because zero - // means read it all. + req.ReadOffset = params.offset + seen + + // Only set a ReadLimit if length is greater than zero, because <= 0 means + // to read it all. if params.length > 0 { req.ReadLimit = params.length - seen } - req.ReadOffset = start if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil { cancel() @@ -901,7 +934,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange var msg *storagepb.ReadObjectResponse var err error - err = run(cc, func() error { + err = run(cc, func(ctx context.Context) error { stream, err = c.raw.ReadObject(cc, req, s.gax...) if err != nil { return err @@ -915,7 +948,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { // Close the stream context we just created to ensure we don't leak // resources. @@ -957,19 +990,27 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange // client buffer for reading later. leftovers: msg.GetChecksummedData().GetContent(), settings: s, + zeroRange: params.length == 0, }, } cr := msg.GetContentRange() if cr != nil { r.Attrs.StartOffset = cr.GetStart() - r.remain = cr.GetEnd() - cr.GetStart() + 1 + r.remain = cr.GetEnd() - cr.GetStart() } else { r.remain = size } + // For a zero-length request, explicitly close the stream and set remaining + // bytes to zero. + if params.length == 0 { + r.remain = 0 + r.reader.Close() + } + // Only support checksums when reading an entire object, not a range. - if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length == 0 { + if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 { r.wantCRC = checksums.GetCrc32C() r.checkCRC = true } @@ -1009,11 +1050,13 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage } // The chunk buffer is full, but there is no end in sight. This - // means that a resumable upload will need to be used to send + // means that either: + // 1. A resumable upload will need to be used to send // multiple chunks, until we are done reading data. Start a // resumable upload if it has not already been started. - // Otherwise, all data will be sent over a single gRPC stream. - if !doneReading && gw.upid == "" { + // 2. ChunkSize of zero may also have a full buffer, but a resumable + // session should not be initiated in this case. + if !doneReading && gw.upid == "" && params.chunkSize != 0 { err = gw.startResumableUpload() if err != nil { err = checkCanceled(err) @@ -1023,22 +1066,28 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage } } - o, off, finalized, err := gw.uploadBuffer(recvd, offset, doneReading) + o, off, err := gw.uploadBuffer(recvd, offset, doneReading) if err != nil { err = checkCanceled(err) errorf(err) pr.CloseWithError(err) return } - // At this point, the current buffer has been uploaded. Capture the - // committed offset here in case the upload was not finalized and - // another chunk is to be uploaded. - offset = off - progress(offset) - - // When we are done reading data and the chunk has been finalized, - // we are done. - if doneReading && finalized { + + // At this point, the current buffer has been uploaded. For resumable + // uploads and chunkSize = 0, capture the committed offset here in case + // the upload was not finalized and another chunk is to be uploaded. Call + // the progress function for resumable uploads only. + if gw.upid != "" || gw.chunkSize == 0 { + offset = off + } + if gw.upid != "" { + progress(offset) + } + + // When we are done reading data without errors, set the object and + // finish. + if doneReading { // Build Object from server's response. setObj(newObjectFromProto(o)) return @@ -1061,11 +1110,11 @@ func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, v }, } var rp *iampb.Policy - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return rp, err } @@ -1079,10 +1128,10 @@ func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, p Policy: policy, } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { @@ -1093,11 +1142,11 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str Permissions: permissions, } var res *iampb.TestIamPermissionsResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1116,11 +1165,11 @@ func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID st ctx = setUserProjectMetadata(ctx, s.userProject) } var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1142,13 +1191,13 @@ func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc projectID: project, retry: s.retry, } - gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) fetch := func(pageSize int, pageToken string) (token string, err error) { var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { + gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -1195,11 +1244,11 @@ func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceA ctx = setUserProjectMetadata(ctx, s.userProject) } var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1216,11 +1265,11 @@ func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceA ctx = setUserProjectMetadata(ctx, s.userProject) } var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1239,9 +1288,9 @@ func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, a if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } // Notification methods. @@ -1254,12 +1303,12 @@ func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - req := &storagepb.ListNotificationsRequest{ + req := &storagepb.ListNotificationConfigsRequest{ Parent: bucketResourceName(globalProjectAlias, bucket), } - var notifications []*storagepb.Notification - err = run(ctx, func() error { - gitr := c.raw.ListNotifications(ctx, req, s.gax...) + var notifications []*storagepb.NotificationConfig + err = run(ctx, func(ctx context.Context) error { + gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) for { // PageSize is not set and fallbacks to the API default pageSize of 100. items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken()) @@ -1273,7 +1322,7 @@ func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string } req.PageToken = nextPageToken } - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1286,16 +1335,16 @@ func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket strin defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) - req := &storagepb.CreateNotificationRequest{ - Parent: bucketResourceName(globalProjectAlias, bucket), - Notification: toProtoNotification(n), + req := &storagepb.CreateNotificationConfigRequest{ + Parent: bucketResourceName(globalProjectAlias, bucket), + NotificationConfig: toProtoNotification(n), } - var pbn *storagepb.Notification - err = run(ctx, func() error { + var pbn *storagepb.NotificationConfig + err = run(ctx, func(ctx context.Context) error { var err error - pbn, err = c.raw.CreateNotification(ctx, req, s.gax...) + pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1307,10 +1356,10 @@ func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket strin defer func() { trace.EndSpan(ctx, err) }() s := callSettings(c.settings, opts...) - req := &storagepb.DeleteNotificationRequest{Name: id} - return run(ctx, func() error { - return c.raw.DeleteNotification(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + req := &storagepb.DeleteNotificationConfigRequest{Name: id} + return run(ctx, func(ctx context.Context) error { + return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) + }, s.retry, s.idempotent) } // setUserProjectMetadata appends a project ID to the outgoing Context metadata @@ -1329,6 +1378,7 @@ type readStreamResponse struct { type gRPCReader struct { seen, size int64 + zeroRange bool stream storagepb.Storage_ReadObjectClient reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error) leftovers []byte @@ -1338,7 +1388,12 @@ type gRPCReader struct { // Read reads bytes into the user's buffer from an open gRPC stream. func (r *gRPCReader) Read(p []byte) (int, error) { - // No stream to read from, either never initiliazed or Close was called. + // The entire object has been read by this reader, return EOF. + if r.size == r.seen || r.zeroRange { + return 0, io.EOF + } + + // No stream to read from, either never initialized or Close was called. // Note: There is a potential concurrency issue if multiple routines are // using the same reader. One encounters an error and the stream is closed // and then reopened while the other routine attempts to read from it. @@ -1346,11 +1401,6 @@ func (r *gRPCReader) Read(p []byte) (int, error) { return 0, fmt.Errorf("reader has been closed") } - // The entire object has been read by this reader, return EOF. - if r.size != 0 && r.size == r.seen { - return 0, io.EOF - } - var n int // Read leftovers and return what was available to conform to the Reader // interface: https://pkg.go.dev/io#Reader. @@ -1441,14 +1491,17 @@ func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) { func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter { size := params.chunkSize + + // Round up chunksize to nearest 256KiB + if size%googleapi.MinUploadChunkSize != 0 { + size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize) + } + + // A completely bufferless upload is not possible as it is in JSON because + // the buffer must be provided to the message. However use the minimum size + // possible in this case. if params.chunkSize == 0 { - // TODO: Should we actually use the minimum of 256 KB here when the user - // indicates they want minimal memory usage? We cannot do a zero-copy, - // bufferless upload like HTTP/JSON can. - // TODO: We need to determine if we can avoid starting a - // resumable upload when the user *plans* to send more than bufSize but - // with a bufferless upload. - size = maxPerMessageWriteSize + size = googleapi.MinUploadChunkSize } return &gRPCWriter{ @@ -1461,6 +1514,7 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) conds: params.conds, encryptionKey: params.encryptionKey, sendCRC32C: params.sendCRC32C, + chunkSize: params.chunkSize, } } @@ -1480,9 +1534,10 @@ type gRPCWriter struct { settings *settings sendCRC32C bool + chunkSize int // The gRPC client-stream used for sending buffers. - stream storagepb.Storage_WriteObjectClient + stream storagepb.Storage_BidiWriteObjectClient // The Resumable Upload ID started by a gRPC-based Writer. upid string @@ -1503,89 +1558,101 @@ func (w *gRPCWriter) startResumableUpload() error { // the upload, but in the future, we must also support sending it // on the *last* message of the stream. req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - return run(w.ctx, func() error { + return run(w.ctx, func(ctx context.Context) error { upres, err := w.c.raw.StartResumableWrite(w.ctx, req) w.upid = upres.GetUploadId() return err - }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) + }, w.settings.retry, w.settings.idempotent) } // queryProgress is a helper that queries the status of the resumable upload // associated with the given upload ID. func (w *gRPCWriter) queryProgress() (int64, error) { var persistedSize int64 - err := run(w.ctx, func() error { + err := run(w.ctx, func(ctx context.Context) error { q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ UploadId: w.upid, }) persistedSize = q.GetPersistedSize() return err - }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) + }, w.settings.retry, true) // q.GetCommittedSize() will return 0 if q is nil. return persistedSize, err } -// uploadBuffer opens a Write stream and uploads the buffer at the given offset (if -// uploading a chunk for a resumable uploadBuffer), and will mark the write as -// finished if we are done receiving data from the user. The resulting write -// offset after uploading the buffer is returned, as well as a boolean -// indicating if the Object has been finalized. If it has been finalized, the -// final Object will be returned as well. Finalizing the upload is primarily -// important for Resumable Uploads. A simple or multi-part upload will always -// be finalized once the entire buffer has been written. -func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, bool, error) { - var err error - var finishWrite bool - var sent, limit int = 0, maxPerMessageWriteSize +// uploadBuffer uploads the buffer at the given offset using a bi-directional +// Write stream. It will open a new stream if necessary (on the first call or +// after resuming from failure). The resulting write offset after uploading the +// buffer is returned, as well as well as the final Object if the upload is +// completed. +// +// Returns object, persisted size, and any error that is not retriable. +func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, error) { var shouldRetry = ShouldRetry if w.settings.retry != nil && w.settings.retry.shouldRetry != nil { shouldRetry = w.settings.retry.shouldRetry } - offset := start + + var err error + var lastWriteOfEntireObject bool + + sent := 0 + writeOffset := start + toWrite := w.buf[:recvd] + + // Send a request with as many bytes as possible. + // Loop until all bytes are sent. for { - first := sent == 0 - // This indicates that this is the last message and the remaining - // data fits in one message. - belowLimit := recvd-sent <= limit - if belowLimit { - limit = recvd - sent + bytesNotYetSent := recvd - sent + remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize + + if remainingDataFitsInSingleReq && doneReading { + lastWriteOfEntireObject = true } - if belowLimit && doneReading { - finishWrite = true + + // Send the maximum amount of bytes we can, unless we don't have that many. + bytesToSendInCurrReq := maxPerMessageWriteSize + if remainingDataFitsInSingleReq { + bytesToSendInCurrReq = bytesNotYetSent } // Prepare chunk section for upload. - data := toWrite[sent : sent+limit] - req := &storagepb.WriteObjectRequest{ - Data: &storagepb.WriteObjectRequest_ChecksummedData{ + data := toWrite[sent : sent+bytesToSendInCurrReq] + + req := &storagepb.BidiWriteObjectRequest{ + Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{ ChecksummedData: &storagepb.ChecksummedData{ Content: data, }, }, - WriteOffset: offset, - FinishWrite: finishWrite, + WriteOffset: writeOffset, + FinishWrite: lastWriteOfEntireObject, + Flush: remainingDataFitsInSingleReq, + StateLookup: remainingDataFitsInSingleReq, } - // Open a new stream and set the first_message field on the request. - // The first message on the WriteObject stream must either be the - // Object or the Resumable Upload ID. - if first { - ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket))) - w.stream, err = w.c.raw.WriteObject(ctx) + // Open a new stream if necessary and set the first_message field on + // the request. The first message on the WriteObject stream must either + // be the Object or the Resumable Upload ID. + if w.stream == nil { + hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))} + ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...) + + w.stream, err = w.c.raw.BidiWriteObject(ctx) if err != nil { - return nil, 0, false, err + return nil, 0, err } - if w.upid != "" { - req.FirstMessage = &storagepb.WriteObjectRequest_UploadId{UploadId: w.upid} - } else { + if w.upid != "" { // resumable upload + req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid} + } else { // non-resumable spec, err := w.writeObjectSpec() if err != nil { - return nil, 0, false, err + return nil, 0, err } - req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ + req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{ WriteObjectSpec: spec, } req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) @@ -1595,38 +1662,53 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // on the *last* message of the stream (instead of the first). req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) } - } err = w.stream.Send(req) if err == io.EOF { // err was io.EOF. The client-side of a stream only gets an EOF on Send // when the backend closes the stream and wants to return an error - // status. Closing the stream receives the status as an error. - _, err = w.stream.CloseAndRecv() + // status. + + // Receive from the stream Recv() until it returns a non-nil error + // to receive the server's status as an error. We may get multiple + // messages before the error due to buffering. + err = nil + for err == nil { + _, err = w.stream.Recv() + } + // Drop the stream reference as a new one will need to be created if + // we retry. + w.stream = nil + + // Drop the stream reference as a new one will need to be created if + // we can retry the upload + w.stream = nil // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. + // If not retriable, falling through will return the error received. if shouldRetry(err) { - sent = 0 - finishWrite = false // TODO: Add test case for failure modes of querying progress. - offset, err = w.determineOffset(start) - if err == nil { - continue + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err } + sent = int(writeOffset) - int(start) + + // Continue sending requests, opening a new stream and resending + // any bytes not yet persisted as per QueryWriteStatus + continue } } if err != nil { - return nil, 0, false, err + return nil, 0, err } // Update the immediate stream's sent total and the upload offset with // the data sent. sent += len(data) - offset += int64(len(data)) + writeOffset += int64(len(data)) // Not done sending data, do not attempt to commit it yet, loop around // and send more data. @@ -1634,25 +1716,82 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st continue } - // Done sending data. Close the stream to "commit" the data sent. - resp, finalized, err := w.commit() + // The buffer has been uploaded and there is still more data to be + // uploaded, but this is not a resumable upload session. Therefore, + // don't check persisted data. + if !lastWriteOfEntireObject && w.chunkSize == 0 { + return nil, writeOffset, nil + } + + // Done sending data (remainingDataFitsInSingleReq should == true if we + // reach this code). Receive from the stream to confirm the persisted data. + resp, err := w.stream.Recv() + // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received // from closing the stream. if shouldRetry(err) { - sent = 0 - finishWrite = false - offset, err = w.determineOffset(start) - if err == nil { - continue + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err } + sent = int(writeOffset) - int(start) + + // Drop the stream reference as a new one will need to be created. + w.stream = nil + + continue } if err != nil { - return nil, 0, false, err + return nil, 0, err } - return resp.GetResource(), offset, finalized, nil + // Confirm the persisted data if we have not finished uploading the object. + if !lastWriteOfEntireObject { + if resp.GetPersistedSize() != writeOffset { + // Retry if not all bytes were persisted. + writeOffset = resp.GetPersistedSize() + sent = int(writeOffset) - int(start) + continue + } + } else { + // If the object is done uploading, close the send stream to signal + // to the server that we are done sending so that we can receive + // from the stream without blocking. + err = w.stream.CloseSend() + if err != nil { + // CloseSend() retries the send internally. It never returns an + // error in the current implementation, but we check it anyway in + // case that it does in the future. + return nil, 0, err + } + + // Stream receives do not block once send is closed, but we may not + // receive the response with the object right away; loop until we + // receive the object or error out. + var obj *storagepb.Object + for obj == nil { + resp, err := w.stream.Recv() + if err != nil { + return nil, 0, err + } + + obj = resp.GetResource() + } + + // Even though we received the object response, continue reading + // until we receive a non-nil error, to ensure the stream does not + // leak even if the context isn't cancelled. See: + // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream + for err == nil { + _, err = w.stream.Recv() + } + + return obj, writeOffset, nil + } + + return nil, writeOffset, nil } } @@ -1672,26 +1811,6 @@ func (w *gRPCWriter) determineOffset(offset int64) (int64, error) { return offset, nil } -// commit closes the stream to commit the data sent and potentially receive -// the finalized object if finished uploading. If the last request sent -// indicated that writing was finished, the Object will be finalized and -// returned. If not, then the Object will be nil, and the boolean returned will -// be false. -func (w *gRPCWriter) commit() (*storagepb.WriteObjectResponse, bool, error) { - finalized := true - resp, err := w.stream.CloseAndRecv() - if err == io.EOF { - // Closing a stream for a resumable upload finish_write = false results - // in an EOF which can be ignored, as we aren't done uploading yet. - finalized = false - err = nil - } - // Drop the stream reference as it has been closed. - w.stream = nil - - return resp, finalized, err -} - // writeObjectSpec constructs a WriteObjectSpec proto using the Writer's // ObjectAttrs and applies its Conditions. This is only used for gRPC. func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) { diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 422a7c2335..1b9fbe9dd2 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -20,14 +20,12 @@ import ( "fmt" "time" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "google.golang.org/api/iterator" raw "google.golang.org/api/storage/v1" ) // HMACState is the state of the HMAC key. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACState string const ( @@ -50,8 +48,6 @@ const ( // // HMAC keys are used to authenticate signed access to objects. To enable HMAC key // authentication, please visit https://cloud.google.com/storage/docs/migrating. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKey struct { // The HMAC's secret key. Secret string @@ -85,8 +81,6 @@ type HMACKey struct { } // HMACKeyHandle helps provide access and management for HMAC keys. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeyHandle struct { projectID string accessID string @@ -95,8 +89,6 @@ type HMACKeyHandle struct { } // HMACKeyHandle creates a handle that will be used for HMACKey operations. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { return &HMACKeyHandle{ projectID: projectID, @@ -111,8 +103,6 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // // Options such as UserProjectForHMACKeys can be used to set the // userProject to be billed against for operations. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -128,8 +118,6 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC // Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage. // Only inactive HMAC keys can be deleted. // After deletion, a key cannot be used to authenticate requests. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -187,8 +175,6 @@ func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey { } // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { if projectID == "" { return nil, errors.New("storage: expecting a non-blank projectID") @@ -208,8 +194,6 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma } // HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeyAttrsToUpdate struct { // State is required and must be either StateActive or StateInactive. State HMACState @@ -219,8 +203,6 @@ type HMACKeyAttrsToUpdate struct { } // Update mutates the HMACKey referred to by accessID. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { if au.State != Active && au.State != Inactive { return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) @@ -240,8 +222,6 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt // An HMACKeysIterator is an iterator over HMACKeys. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeysIterator struct { ctx context.Context raw *raw.ProjectsHmacKeysService @@ -257,8 +237,6 @@ type HMACKeysIterator struct { // ListHMACKeys returns an iterator for listing HMACKeys. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { desc := new(hmacKeyDesc) for _, opt := range opts { @@ -274,8 +252,6 @@ func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMA // calls will return iterator.Done. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) Next() (*HMACKey, error) { if err := it.nextFunc(); err != nil { return nil, err @@ -290,8 +266,6 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. // // Note: This iterator is not safe for concurrent operations without explicit synchronization. -// -// This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { @@ -315,12 +289,11 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, call = call.MaxResults(int64(pageSize)) } - ctx := it.ctx var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { resp, err = call.Context(ctx).Do() return err - }, it.retry, true, setRetryHeaderHTTP(call)) + }, it.retry, true) if err != nil { return "", err } @@ -345,8 +318,6 @@ type hmacKeyDesc struct { } // HMACKeyOption configures the behavior of HMACKey related methods and actions. -// -// This interface is EXPERIMENTAL and subject to change or removal without notice. type HMACKeyOption interface { withHMACKeyDesc(*hmacKeyDesc) } @@ -362,8 +333,6 @@ func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { // // Only one service account email can be used as a filter, so if multiple // of these options are applied, the last email to be set will be used. -// -// This option is EXPERIMENTAL and subject to change or removal without notice. func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { hkd.forServiceAccountEmail = serviceAccountEmail @@ -371,8 +340,6 @@ func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { } // ShowDeletedHMACKeys will also list keys whose state is "DELETED". -// -// This option is EXPERIMENTAL and subject to change or removal without notice. func ShowDeletedHMACKeys() HMACKeyOption { return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { hkd.showDeletedKeys = true @@ -383,8 +350,6 @@ func ShowDeletedHMACKeys() HMACKeyOption { // if userProjectID is non-empty. // // Note: This is a noop right now and only provided for API compatibility. -// -// This option is EXPERIMENTAL and subject to change or removal without notice. func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { hkd.userProjectID = userProjectID diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index fae96043a9..b62f009da1 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -29,8 +29,10 @@ import ( "strings" "time" + "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" + "github.com/googleapis/gax-go/v2/callctx" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "google.golang.org/api/iterator" @@ -39,29 +41,26 @@ import ( raw "google.golang.org/api/storage/v1" "google.golang.org/api/transport" htransport "google.golang.org/api/transport/http" - iampb "google.golang.org/genproto/googleapis/iam/v1" ) // httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic // storageClient interface. -// -// This is an experimental API and not intended for public use. type httpStorageClient struct { creds *google.Credentials hc *http.Client - readHost string + xmlHost string raw *raw.Service scheme string settings *settings + config *storageConfig } // newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON // Storage API. -// -// This is an experimental API and not intended for public use. func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) o := s.clientOption + config := newStorageConfig(o...) var creds *google.Credentials // In general, it is recommended to use raw.NewService instead of htransport.NewClient @@ -121,7 +120,7 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl if err != nil { return nil, fmt.Errorf("storage client: %w", err) } - // Update readHost and scheme with the chosen endpoint. + // Update xmlHost and scheme with the chosen endpoint. u, err := url.Parse(ep) if err != nil { return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) @@ -130,10 +129,11 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl return &httpStorageClient{ creds: creds, hc: hc, - readHost: u.Host, + xmlHost: u.Host, raw: rawService, scheme: u.Scheme, settings: s, + config: &config, }, nil } @@ -148,11 +148,11 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin s := callSettings(c.settings, opts...) call := c.raw.Projects.ServiceAccount.Get(project) var res *raw.ServiceAccount - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -182,14 +182,14 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { b, err := req.Context(ctx).Do() if err != nil { return err } battrs, err = newBucket(b) return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) return battrs, err } @@ -210,10 +210,10 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt req.MaxResults(int64(pageSize)) } var resp *raw.Buckets - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -248,7 +248,7 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con req.UserProject(s.userProject) } - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { @@ -264,10 +264,10 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds } var resp *raw.Bucket - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { @@ -298,10 +298,10 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat } var rawBucket *raw.Bucket - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { rawBucket, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -317,10 +317,10 @@ func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucke } req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { s := callSettings(c.settings, opts...) @@ -344,6 +344,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.EndOffset(it.query.EndOffset) req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) + req.MatchGlob(it.query.MatchGlob) if selection := it.query.toFieldSelection(); selection != "" { req.Fields("nextPageToken", googleapi.Field(selection)) } @@ -356,10 +357,10 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q } var resp *raw.Objects var err error - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { @@ -394,7 +395,7 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str if s.userProject != "" { req.UserProject(s.userProject) } - err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { return ErrObjectNotExist @@ -416,10 +417,10 @@ func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string } var obj *raw.Object var err error - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist @@ -498,7 +499,7 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str rawObj := attrs.toRawObject(bucket) rawObj.ForceSendFields = forceSendFields rawObj.NullFields = nullFields - call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) + call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full") if err := applyConds("Update", gen, conds, call); err != nil { return nil, err } @@ -513,7 +514,7 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str } var obj *raw.Object var err error - err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) var e *googleapi.Error if errors.As(err, &e) && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist @@ -530,7 +531,7 @@ func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s s := callSettings(c.settings, opts...) req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { @@ -539,10 +540,10 @@ func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st var err error req := c.raw.DefaultObjectAccessControls.List(bucket) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(req)) + }, s.retry, true) if err != nil { return nil, err } @@ -559,14 +560,13 @@ func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket s Entity: string(entity), Role: string(role), } - var req setRequest var err error - req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + req := c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // Bucket ACL methods. @@ -575,7 +575,7 @@ func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, s := callSettings(c.settings, opts...) req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { @@ -584,10 +584,10 @@ func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, o var err error req := c.raw.BucketAccessControls.List(bucket) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(req)) + }, s.retry, true) if err != nil { return nil, err } @@ -604,10 +604,10 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) var err error - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // configureACLCall sets the context, user project and headers on the apiary library call. @@ -627,7 +627,7 @@ func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object s := callSettings(c.settings, opts...) req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } // ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. @@ -638,10 +638,10 @@ func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object s var err error req := c.raw.ObjectAccessControls.List(bucket, object) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -660,14 +660,13 @@ func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object Entity: string(entity), Role: string(role), } - var req setRequest var err error - req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + req := c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // Media operations. @@ -691,7 +690,7 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) } - call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq) if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } @@ -708,9 +707,9 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec setClientHeader(call.Header()) var err error - retryCall := func() error { obj, err = call.Do(); return err } + retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } return newObject(obj), nil @@ -720,7 +719,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec rawObject := req.dstObject.attrs.toRawObject("") call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) - call.Context(ctx).Projection("full") + call.Projection("full") if req.token != "" { call.RewriteToken(req.token) } @@ -756,9 +755,9 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var err error setClientHeader(call.Header()) - retryCall := func() error { res, err = call.Do(); return err } + retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } @@ -779,10 +778,18 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange s := callSettings(c.settings, opts...) + if c.config.useJSONforReads { + return c.newRangeReaderJSON(ctx, params, s) + } + return c.newRangeReaderXML(ctx, params, s) +} + +func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { u := &url.URL{ - Scheme: c.scheme, - Host: c.readHost, - Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + Scheme: c.scheme, + Host: c.xmlHost, + Path: fmt.Sprintf("/%s/%s", params.bucket, params.object), + RawPath: fmt.Sprintf("/%s/%s", params.bucket, url.PathEscape(params.object)), } verb := "GET" if params.length == 0 { @@ -792,187 +799,59 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange if err != nil { return nil, err } - req = req.WithContext(ctx) + if s.userProject != "" { req.Header.Set("X-Goog-User-Project", s.userProject) } - if params.readCompressed { - req.Header.Set("Accept-Encoding", "gzip") - } - if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil { + + if err := setRangeReaderHeaders(req.Header, params); err != nil { return nil, err } - // Define a function that initiates a Read with offset and length, assuming we - // have already read seen bytes. - reopen := func(seen int64) (*http.Response, error) { - // If the context has already expired, return immediately without making a - // call. - if err := ctx.Err(); err != nil { - return nil, err - } - start := params.offset + seen - if params.length < 0 && start < 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d", start)) - } else if params.length < 0 && start > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) - } else if params.length > 0 { - // The end character isn't affected by how many bytes we've seen. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) - } - // We wait to assign conditions here because the generation number can change in between reopen() runs. - if err := setConditionsHeaders(req.Header, params.conds); err != nil { - return nil, err - } - // If an object generation is specified, include generation as query string parameters. - if params.gen >= 0 { - req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) - } - - var res *http.Response - err = run(ctx, func() error { - res, err = c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist - } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), - } - } - - partialContentNotSatisfied := - !decompressiveTranscoding(res) && - start > 0 && params.length != 0 && - res.StatusCode != http.StatusPartialContent - - if partialContentNotSatisfied { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - - // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves - // back the whole file regardless of the range count passed in as per: - // https://cloud.google.com/storage/docs/transcoding#range, - // thus we have to manually move the body forward by seen bytes. - if decompressiveTranscoding(res) && seen > 0 { - _, _ = io.CopyN(ioutil.Discard, res.Body, seen) - } - - // If a generation hasn't been specified, and this is the first response we get, let's record the - // generation. In future requests we'll use this generation as a precondition to avoid data races. - if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { - gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) - if err != nil { - return err - } - params.gen = gen64 - } - return nil - }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) - if err != nil { - return nil, err + // Set custom headers passed in via the context. This is only required for XML; + // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively. + ctxHeaders := callctx.HeadersFromContext(ctx) + for k, vals := range ctxHeaders { + for _, v := range vals { + req.Header.Add(k, v) } - return res, nil } + reopen := readerReopen(ctx, req.Header, params, s, + func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) }, + func() error { return setConditionsHeaders(req.Header, params.conds) }, + func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) + res, err := reopen(0) if err != nil { return nil, err } - var ( - size int64 // total size of object, even if a range was requested. - checkCRC bool - crc uint32 - startOffset int64 // non-zero if range request. - ) - if res.StatusCode == http.StatusPartialContent { - cr := strings.TrimSpace(res.Header.Get("Content-Range")) - if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } - // Content range is formatted -/. We take - // the total size. - size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) - } + return parseReadResponse(res, params, reopen) +} - dashIndex := strings.Index(cr, "-") - if dashIndex >= 0 { - startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) - if err != nil { - return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) - } - } - } else { - size = res.ContentLength - // Check the CRC iff all of the following hold: - // - We asked for content (length != 0). - // - We got all the content (status != PartialContent). - // - The server sent a CRC header. - // - The Go http stack did not uncompress the file. - // - We were not served compressed data that was uncompressed on download. - // The problem with the last two cases is that the CRC will not match -- GCS - // computes it on the compressed contents, but we compute it on the - // uncompressed contents. - if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { - crc, checkCRC = parseCRC32c(res) - } - } +func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) { + call := c.raw.Objects.Get(params.bucket, params.object) - remain := res.ContentLength - body := res.Body - if params.length == 0 { - remain = 0 - body.Close() - body = emptyBody - } - var metaGen int64 - if res.Header.Get("X-Goog-Metageneration") != "" { - metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) - if err != nil { - return nil, err - } + setClientHeader(call.Header()) + call.Projection("full") + + if s.userProject != "" { + call.UserProject(s.userProject) } - var lm time.Time - if res.Header.Get("Last-Modified") != "" { - lm, err = http.ParseTime(res.Header.Get("Last-Modified")) - if err != nil { - return nil, err - } + if err := setRangeReaderHeaders(call.Header(), params); err != nil { + return nil, err } - attrs := ReaderObjectAttrs{ - Size: size, - ContentType: res.Header.Get("Content-Type"), - ContentEncoding: res.Header.Get("Content-Encoding"), - CacheControl: res.Header.Get("Cache-Control"), - LastModified: lm, - StartOffset: startOffset, - Generation: params.gen, - Metageneration: metaGen, + reopen := readerReopen(ctx, call.Header(), params, s, func(ctx context.Context) (*http.Response, error) { return call.Context(ctx).Download() }, + func() error { return applyConds("NewReader", params.gen, params.conds, call) }, + func() { call.Generation(params.gen) }) + + res, err := reopen(0) + if err != nil { + return nil, err } - return &Reader{ - Attrs: attrs, - size: size, - remain: remain, - wantCRC: crc, - checkCRC: checkCRC, - reader: &httpReader{ - reopen: reopen, - body: body, - }, - }, nil + return parseReadResponse(res, params, reopen) } func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) { @@ -1074,11 +953,11 @@ func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, v call.UserProject(s.userProject) } var rp *raw.Policy - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error rp, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1095,10 +974,10 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { @@ -1109,11 +988,11 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str call.UserProject(s.userProject) } var res *raw.TestIamPermissionsResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1131,10 +1010,10 @@ func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID st var metadata *raw.HmacKeyMetadata var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { metadata, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } hk := &raw.HmacKey{ @@ -1171,10 +1050,10 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc } var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { - resp, err = call.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -1216,10 +1095,10 @@ func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceA var metadata *raw.HmacKeyMetadata var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { metadata, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } hk := &raw.HmacKey{ @@ -1236,11 +1115,11 @@ func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceA } var hk *raw.HmacKey - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { h, err := call.Context(ctx).Do() hk = h return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } return toHMACKeyFromRaw(hk, true) @@ -1252,9 +1131,9 @@ func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, a if s.userProject != "" { call = call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } // Notification methods. @@ -1273,10 +1152,10 @@ func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string call.UserProject(s.userProject) } var res *raw.Notifications - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { res, err = call.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(call)) + }, s.retry, true) if err != nil { return nil, err } @@ -1293,10 +1172,10 @@ func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket strin call.UserProject(s.userProject) } var rn *raw.Notification - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { rn, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1312,9 +1191,9 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin if s.userProject != "" { call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } type httpReader struct { @@ -1349,3 +1228,197 @@ func (r *httpReader) Read(p []byte) (int, error) { func (r *httpReader) Close() error { return r.body.Close() } + +func setRangeReaderHeaders(h http.Header, params *newRangeReaderParams) error { + if params.readCompressed { + h.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(h, params.encryptionKey, false); err != nil { + return err + } + return nil +} + +// readerReopen initiates a Read with offset and length, assuming we +// have already read seen bytes. +func readerReopen(ctx context.Context, header http.Header, params *newRangeReaderParams, s *settings, + doDownload func(context.Context) (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { + return func(seen int64) (*http.Response, error) { + // If the context has already expired, return immediately without making a + // call. + if err := ctx.Err(); err != nil { + return nil, err + } + start := params.offset + seen + if params.length < 0 && start < 0 { + header.Set("Range", fmt.Sprintf("bytes=%d", start)) + } else if params.length < 0 && start > 0 { + header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if params.length > 0 { + // The end character isn't affected by how many bytes we've seen. + header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, params.offset+params.length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + if err := applyConditions(); err != nil { + return nil, err + } + // If an object generation is specified, include generation as query string parameters. + if params.gen >= 0 { + setGeneration() + } + + var err error + var res *http.Response + err = run(ctx, func(ctx context.Context) error { + res, err = doDownload(ctx) + if err != nil { + var e *googleapi.Error + if errors.As(err, &e) { + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err + } + + if res.StatusCode == http.StatusNotFound { + // this check is necessary only for XML + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && params.length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + params.gen = gen64 + } + return nil + }, s.retry, s.idempotent) + if err != nil { + return nil, err + } + return res, nil + } +} + +func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen func(int64) (*http.Response, error)) (*Reader, error) { + var err error + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + startOffset int64 // non-zero if range request. + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + // Content range is formatted -/. We take + // the total size. + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + + dashIndex := strings.Index(cr, "-") + if dashIndex >= 0 { + startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err) + } + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + // If the user requested zero bytes, explicitly close and remove the request + // body. + if params.length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Metageneration") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + StartOffset: startOffset, + Generation: params.gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reader: &httpReader{ + reopen: reopen, + body: body, + }, + }, nil +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 408661718f..4c01bff4f3 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -18,9 +18,9 @@ import ( "context" "cloud.google.com/go/iam" + "cloud.google.com/go/iam/apiv1/iampb" "cloud.google.com/go/internal/trace" raw "google.golang.org/api/storage/v1" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/genproto/googleapis/type/expr" ) diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go new file mode 100644 index 0000000000..c6fd4b341f --- /dev/null +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -0,0 +1,210 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package storage + +import ( + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" + "google.golang.org/api/iterator" +) + +// BucketIterator manages a stream of *storagepb.Bucket. +type BucketIterator struct { + items []*storagepb.Bucket + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *BucketIterator) Next() (*storagepb.Bucket, error) { + var item *storagepb.Bucket + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *BucketIterator) bufLen() int { + return len(it.items) +} + +func (it *BucketIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. +type HmacKeyMetadataIterator struct { + items []*storagepb.HmacKeyMetadata + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { + var item *storagepb.HmacKeyMetadata + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *HmacKeyMetadataIterator) bufLen() int { + return len(it.items) +} + +func (it *HmacKeyMetadataIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig. +type NotificationConfigIterator struct { + items []*storagepb.NotificationConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) { + var item *storagepb.NotificationConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *NotificationConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *NotificationConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ObjectIterator manages a stream of *storagepb.Object. +type ObjectIterator struct { + items []*storagepb.Object + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ObjectIterator) Next() (*storagepb.Object, error) { + var item *storagepb.Object + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) bufLen() int { + return len(it.items) +} + +func (it *ObjectIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index e33b5222ab..8159589ea9 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -17,14 +17,29 @@ // Package storage is an auto-generated package for the // Cloud Storage API. // -// Lets you store and retrieve potentially-large, immutable data objects. -// -// NOTE: This package is in alpha. It is not stable, and is likely to change. +// Stop. This folder is likely not what you are looking for. This folder +// contains protocol buffer definitions for an unreleased API for accessing +// Cloud Storage. Unless told otherwise by a Google Cloud representative, do +// not use any of the contents of this folder. If you would like to use Cloud +// Storage, please consult our official documentation (at +// https://cloud.google.com/storage/docs/apis) for details on our XML and +// JSON APIs, or else consider one of our client libraries (at +// https://cloud.google.com/storage/docs/reference/libraries). This API +// defined in this folder is unreleased and may shut off, break, or fail at +// any time for any users who are not registered as a part of a private +// preview program. // // # General documentation // -// For information about setting deadlines, reusing contexts, and more -// please visit https://pkg.go.dev/cloud.google.com/go. +// For information that is relevant for all client libraries please reference +// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this +// page includes: +// +// - [Authentication and Authorization] +// - [Timeouts and Cancellation] +// - [Testing against Client Libraries] +// - [Debugging Client Libraries] +// - [Inspecting errors] // // # Example usage // @@ -61,15 +76,32 @@ // // TODO: Handle error. // } // defer c.Close() -// -// req := &storagepb.DeleteBucketRequest{ -// // TODO: Fill request struct fields. -// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest. -// } -// err = c.DeleteBucket(ctx, req) +// stream, err := c.BidiWriteObject(ctx) // if err != nil { // // TODO: Handle error. // } +// go func() { +// reqs := []*storagepb.BidiWriteObjectRequest{ +// // TODO: Create requests. +// } +// for _, req := range reqs { +// if err := stream.Send(req); err != nil { +// // TODO: Handle error. +// } +// } +// stream.CloseSend() +// }() +// for { +// resp, err := stream.Recv() +// if err == io.EOF { +// break +// } +// if err != nil { +// // TODO: handle error. +// } +// // TODO: Use resp. +// _ = resp +// } // // # Use of Context // @@ -78,18 +110,18 @@ // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. +// +// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization +// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation +// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing +// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging +// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors package storage // import "cloud.google.com/go/storage/internal/apiv2" import ( "context" - "os" - "runtime" - "strconv" - "strings" - "unicode" "google.golang.org/api/option" - "google.golang.org/grpc/metadata" ) // For more information on implementing a client constructor hook, see @@ -106,27 +138,6 @@ func getVersionClient() string { return versionClient } -func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - out, _ := metadata.FromOutgoingContext(ctx) - out = out.Copy() - for _, md := range mds { - for k, v := range md { - out[k] = append(out[k], v...) - } - } - return metadata.NewOutgoingContext(ctx, out) -} - -func checkDisableDeadlines() (bool, error) { - raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") - if !ok { - return false, nil - } - - b, err := strconv.ParseBool(raw) - return b, err -} - // DefaultAuthScopes reports the default set of authentication scopes to use with this package. func DefaultAuthScopes() []string { return []string{ @@ -137,40 +148,3 @@ func DefaultAuthScopes() []string { "https://www.googleapis.com/auth/devstorage.read_write", } } - -// versionGo returns the Go runtime version. The returned string -// has no whitespace, suitable for reporting in header. -func versionGo() string { - const develPrefix = "devel +" - - s := runtime.Version() - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - notSemverRune := func(r rune) bool { - return !strings.ContainsRune("0123456789.", r) - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "UNKNOWN" -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 01103fa93b..56256bb2cc 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -10,6 +10,11 @@ "grpc": { "libraryClient": "Client", "rpcs": { + "BidiWriteObject": { + "methods": [ + "BidiWriteObject" + ] + }, "CancelResumableWrite": { "methods": [ "CancelResumableWrite" @@ -30,9 +35,9 @@ "CreateHmacKey" ] }, - "CreateNotification": { + "CreateNotificationConfig": { "methods": [ - "CreateNotification" + "CreateNotificationConfig" ] }, "DeleteBucket": { @@ -45,9 +50,9 @@ "DeleteHmacKey" ] }, - "DeleteNotification": { + "DeleteNotificationConfig": { "methods": [ - "DeleteNotification" + "DeleteNotificationConfig" ] }, "DeleteObject": { @@ -70,9 +75,9 @@ "GetIamPolicy" ] }, - "GetNotification": { + "GetNotificationConfig": { "methods": [ - "GetNotification" + "GetNotificationConfig" ] }, "GetObject": { @@ -95,9 +100,9 @@ "ListHmacKeys" ] }, - "ListNotifications": { + "ListNotificationConfigs": { "methods": [ - "ListNotifications" + "ListNotificationConfigs" ] }, "ListObjects": { @@ -120,6 +125,11 @@ "ReadObject" ] }, + "RestoreObject": { + "methods": [ + "RestoreObject" + ] + }, "RewriteObject": { "methods": [ "RewriteObject" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index aa2c1aff57..6481995060 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -23,16 +23,17 @@ import ( "net/url" "regexp" "strings" + "time" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + iampb "cloud.google.com/go/iam/apiv1/iampb" + storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - iampb "google.golang.org/genproto/googleapis/iam/v1" "google.golang.org/grpc" - "google.golang.org/grpc/metadata" + "google.golang.org/grpc/codes" "google.golang.org/protobuf/proto" ) @@ -49,17 +50,19 @@ type CallOptions struct { SetIamPolicy []gax.CallOption TestIamPermissions []gax.CallOption UpdateBucket []gax.CallOption - DeleteNotification []gax.CallOption - GetNotification []gax.CallOption - CreateNotification []gax.CallOption - ListNotifications []gax.CallOption + DeleteNotificationConfig []gax.CallOption + GetNotificationConfig []gax.CallOption + CreateNotificationConfig []gax.CallOption + ListNotificationConfigs []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption + RestoreObject []gax.CallOption CancelResumableWrite []gax.CallOption GetObject []gax.CallOption ReadObject []gax.CallOption UpdateObject []gax.CallOption WriteObject []gax.CallOption + BidiWriteObject []gax.CallOption ListObjects []gax.CallOption RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption @@ -86,36 +89,419 @@ func defaultGRPCClientOptions() []option.ClientOption { func defaultCallOptions() *CallOptions { return &CallOptions{ - DeleteBucket: []gax.CallOption{}, - GetBucket: []gax.CallOption{}, - CreateBucket: []gax.CallOption{}, - ListBuckets: []gax.CallOption{}, - LockBucketRetentionPolicy: []gax.CallOption{}, - GetIamPolicy: []gax.CallOption{}, - SetIamPolicy: []gax.CallOption{}, - TestIamPermissions: []gax.CallOption{}, - UpdateBucket: []gax.CallOption{}, - DeleteNotification: []gax.CallOption{}, - GetNotification: []gax.CallOption{}, - CreateNotification: []gax.CallOption{}, - ListNotifications: []gax.CallOption{}, - ComposeObject: []gax.CallOption{}, - DeleteObject: []gax.CallOption{}, - CancelResumableWrite: []gax.CallOption{}, - GetObject: []gax.CallOption{}, - ReadObject: []gax.CallOption{}, - UpdateObject: []gax.CallOption{}, - WriteObject: []gax.CallOption{}, - ListObjects: []gax.CallOption{}, - RewriteObject: []gax.CallOption{}, - StartResumableWrite: []gax.CallOption{}, - QueryWriteStatus: []gax.CallOption{}, - GetServiceAccount: []gax.CallOption{}, - CreateHmacKey: []gax.CallOption{}, - DeleteHmacKey: []gax.CallOption{}, - GetHmacKey: []gax.CallOption{}, - ListHmacKeys: []gax.CallOption{}, - UpdateHmacKey: []gax.CallOption{}, + DeleteBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListBuckets: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + LockBucketRetentionPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetIamPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + SetIamPolicy: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + TestIamPermissions: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateBucket: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateNotificationConfig: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListNotificationConfigs: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ComposeObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + RestoreObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CancelResumableWrite: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ReadObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + WriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + BidiWriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListObjects: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + RewriteObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + StartResumableWrite: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + QueryWriteStatus: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetServiceAccount: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CreateHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + ListHmacKeys: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + UpdateHmacKey: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, } } @@ -133,17 +519,19 @@ type internalClient interface { SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error) - DeleteNotification(context.Context, *storagepb.DeleteNotificationRequest, ...gax.CallOption) error - GetNotification(context.Context, *storagepb.GetNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) - CreateNotification(context.Context, *storagepb.CreateNotificationRequest, ...gax.CallOption) (*storagepb.Notification, error) - ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator + DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error + GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) + CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error) + ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) + BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) @@ -239,16 +627,16 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L // GetIamPolicy gets the IAM policy for a specified bucket or object. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } // SetIamPolicy updates an IAM policy for the specified bucket or object. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } @@ -256,8 +644,8 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques // TestIamPermissions tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) } @@ -267,27 +655,27 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe return c.internalClient.UpdateBucket(ctx, req, opts...) } -// DeleteNotification permanently deletes a notification subscription. -func (c *Client) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { - return c.internalClient.DeleteNotification(ctx, req, opts...) +// DeleteNotificationConfig permanently deletes a NotificationConfig. +func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteNotificationConfig(ctx, req, opts...) } -// GetNotification view a notification config. -func (c *Client) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - return c.internalClient.GetNotification(ctx, req, opts...) +// GetNotificationConfig view a NotificationConfig. +func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + return c.internalClient.GetNotificationConfig(ctx, req, opts...) } -// CreateNotification creates a notification subscription for a given bucket. -// These notifications, when triggered, publish messages to the specified -// Pub/Sub topics. -// See https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). -func (c *Client) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { - return c.internalClient.CreateNotification(ctx, req, opts...) +// CreateNotificationConfig creates a NotificationConfig for a given bucket. +// These NotificationConfigs, when triggered, publish messages to the +// specified Pub/Sub topics. See +// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications). +func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { + return c.internalClient.CreateNotificationConfig(ctx, req, opts...) } -// ListNotifications retrieves a list of notification subscriptions for a given bucket. -func (c *Client) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { - return c.internalClient.ListNotifications(ctx, req, opts...) +// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket. +func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { + return c.internalClient.ListNotificationConfigs(ctx, req, opts...) } // ComposeObject concatenates a list of existing objects into a new object in the same @@ -296,13 +684,29 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject return c.internalClient.ComposeObject(ctx, req, opts...) } -// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning -// is not enabled for the bucket, or if the generation parameter is used. +// DeleteObject deletes an object and its metadata. +// +// Deletions are normally permanent when versioning is disabled or whenever +// the generation parameter is used. However, if soft delete is enabled for +// the bucket, deleted objects can be restored using RestoreObject until the +// soft delete retention period has passed. func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error { return c.internalClient.DeleteObject(ctx, req, opts...) } +// RestoreObject restores a soft-deleted object. +func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.RestoreObject(ctx, req, opts...) +} + // CancelResumableWrite cancels an in-progress resumable upload. +// +// Any attempts to write to the resumable upload after cancelling the upload +// will fail. +// +// The behavior for currently in progress write operations is not guaranteed - +// they could either complete before the cancellation or fail if the +// cancellation completes first. func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { return c.internalClient.CancelResumableWrite(ctx, req, opts...) } @@ -365,8 +769,12 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // returned persisted_size; in this case, the service will skip data at // offsets that were already persisted (without checking that it matches // the previously written data), and write only the data starting from the -// persisted offset. This behavior can make client-side handling simpler -// in some cases. +// persisted offset. Even though the data isn’t written, it may still +// incur a performance cost over resuming at the correct write offset. +// This behavior can make client-side handling simpler in some cases. +// +// Clients must only send data that is a multiple of 256 KiB per message, +// unless the object is being finished with finish_write set to true. // // The service will not view the object as complete until the client has // sent a WriteObjectRequest with finish_write set to true. Sending any @@ -378,10 +786,33 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object’s // metadata. +// +// Alternatively, the BidiWriteObject operation may be used to write an +// object with controls over flushing and the ability to fetch the ability to +// determine the current persisted size. func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { return c.internalClient.WriteObject(ctx, opts...) } +// BidiWriteObject stores a new object and metadata. +// +// This is similar to the WriteObject call with the added support for +// manual flushing of persisted state, and the ability to determine current +// persisted size without closing the stream. +// +// The client may specify one or both of the state_lookup and flush fields +// in each BidiWriteObjectRequest. If flush is specified, the data written +// so far will be persisted to storage. If state_lookup is specified, the +// service will respond with a BidiWriteObjectResponse that contains the +// persisted size. If both flush and state_lookup are specified, the flush +// will always occur before a state_lookup, so that both may be set in the +// same request and the returned state will be the state of the object +// post-flush. When the stream is closed, a BidiWriteObjectResponse will +// always be sent to the client, regardless of the value of state_lookup. +func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + return c.internalClient.BidiWriteObject(ctx, opts...) +} + // ListObjects retrieves a list of objects matching the criteria. func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { return c.internalClient.ListObjects(ctx, req, opts...) @@ -454,9 +885,6 @@ type gRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool - // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE - disableDeadlines bool - // Points back to the CallOptions field of the containing Client CallOptions **CallOptions @@ -464,7 +892,7 @@ type gRPCClient struct { client storagepb.StorageClient // The x-goog-* metadata to be sent with each request. - xGoogMetadata metadata.MD + xGoogHeaders []string } // NewClient creates a new storage client based on gRPC. @@ -502,11 +930,6 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error clientOpts = append(clientOpts, hookOpts...) } - disableDeadlines, err := checkDisableDeadlines() - if err != nil { - return nil, err - } - connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) if err != nil { return nil, err @@ -514,10 +937,9 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error client := Client{CallOptions: defaultCallOptions()} c := &gRPCClient{ - connPool: connPool, - disableDeadlines: disableDeadlines, - client: storagepb.NewStorageClient(connPool), - CallOptions: &client.CallOptions, + connPool: connPool, + client: storagepb.NewStorageClient(connPool), + CallOptions: &client.CallOptions, } c.setGoogleClientInfo() @@ -538,9 +960,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn { // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { - kv := append([]string{"gl-go", versionGo()}, keyval...) + kv := append([]string{"gl-go", gax.GoVersion}, keyval...) kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) - c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) + c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)} } // Close closes the connection to the API service. The user should invoke this when @@ -559,9 +981,10 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -581,9 +1004,10 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -603,13 +1027,17 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1]) } + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1])) > 0 { + routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetProject())[1]) + } for headerName, headerValue := range routingHeadersMap { routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -633,9 +1061,10 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) it := &BucketIterator{} req = proto.Clone(req).(*storagepb.ListBucketsRequest) @@ -687,9 +1116,10 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -716,9 +1146,10 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -745,9 +1176,10 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...) var resp *iampb.Policy err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -774,9 +1206,10 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...) var resp *iampb.TestIamPermissionsResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -800,9 +1233,10 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) var resp *storagepb.Bucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -816,7 +1250,7 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck return resp, nil } -func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error { +func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { @@ -826,19 +1260,20 @@ func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.Dele routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteNotification(ctx, req, settings.GRPC...) + _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...) return err }, opts...) return err } -func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { +func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 { @@ -848,14 +1283,15 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...) - var resp *storagepb.Notification + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...) + var resp *storagepb.NotificationConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetNotification(ctx, req, settings.GRPC...) + resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { @@ -864,7 +1300,7 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti return resp, nil } -func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) { +func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { @@ -874,14 +1310,15 @@ func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.Crea routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...) - var resp *storagepb.Notification + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...) + var resp *storagepb.NotificationConfig err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.CreateNotification(ctx, req, settings.GRPC...) + resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { @@ -890,7 +1327,7 @@ func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.Crea return resp, nil } -func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator { +func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator { routingHeaders := "" routingHeadersMap := make(map[string]string) if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 { @@ -900,14 +1337,15 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) - - ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...) - it := &NotificationIterator{} - req = proto.Clone(req).(*storagepb.ListNotificationsRequest) - it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.Notification, string, error) { - resp := &storagepb.ListNotificationsResponse{} + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...) + it := &NotificationConfigIterator{} + req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) { + resp := &storagepb.ListNotificationConfigsResponse{} if pageToken != "" { req.PageToken = pageToken } @@ -918,7 +1356,7 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.ListNotifications(ctx, req, settings.GRPC...) + resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...) return err }, opts...) if err != nil { @@ -926,7 +1364,7 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN } it.Response = resp - return resp.GetNotifications(), resp.GetNextPageToken(), nil + return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil } fetch := func(pageSize int, pageToken string) (string, error) { items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) @@ -954,9 +1392,10 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -980,9 +1419,10 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -992,6 +1432,33 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje return err } +func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RestoreObject[0:len((*c.CallOptions).RestoreObject):len((*c.CallOptions).RestoreObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1002,9 +1469,10 @@ func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.Ca routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...) var resp *storagepb.CancelResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1028,9 +1496,10 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1054,9 +1523,10 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ReadObject[0:len((*c.CallOptions).ReadObject):len((*c.CallOptions).ReadObject)], opts...) var resp storagepb.Storage_ReadObjectClient err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1080,9 +1550,10 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...) var resp *storagepb.Object err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1097,7 +1568,7 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje } func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { - ctx = insertMetadata(ctx, c.xGoogMetadata) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) var resp storagepb.Storage_WriteObjectClient opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1111,6 +1582,21 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s return resp, nil } +func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + var resp storagepb.Storage_BidiWriteObjectClient + opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1121,9 +1607,10 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...) it := &ObjectIterator{} req = proto.Clone(req).(*storagepb.ListObjectsRequest) @@ -1178,9 +1665,10 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...) var resp *storagepb.RewriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1204,9 +1692,10 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...) var resp *storagepb.StartResumableWriteResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1230,9 +1719,10 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...) var resp *storagepb.QueryWriteStatusResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1256,9 +1746,10 @@ func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetSe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...) var resp *storagepb.ServiceAccount err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1282,9 +1773,10 @@ func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHma routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...) var resp *storagepb.CreateHmacKeyResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1308,9 +1800,10 @@ func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHma routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -1330,9 +1823,10 @@ func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...) var resp *storagepb.HmacKeyMetadata err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1356,9 +1850,10 @@ func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKe routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...) it := &HmacKeyMetadataIterator{} req = proto.Clone(req).(*storagepb.ListHmacKeysRequest) @@ -1410,9 +1905,10 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) } routingHeaders = strings.TrimSuffix(routingHeaders, "&") - md := metadata.Pairs("x-goog-request-params", routingHeaders) + hds := []string{"x-goog-request-params", routingHeaders} - ctx = insertMetadata(ctx, c.xGoogMetadata, md) + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...) var resp *storagepb.HmacKeyMetadata err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -1425,191 +1921,3 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma } return resp, nil } - -// BucketIterator manages a stream of *storagepb.Bucket. -type BucketIterator struct { - items []*storagepb.Bucket - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *BucketIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *BucketIterator) Next() (*storagepb.Bucket, error) { - var item *storagepb.Bucket - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *BucketIterator) bufLen() int { - return len(it.items) -} - -func (it *BucketIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata. -type HmacKeyMetadataIterator struct { - items []*storagepb.HmacKeyMetadata - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) { - var item *storagepb.HmacKeyMetadata - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *HmacKeyMetadataIterator) bufLen() int { - return len(it.items) -} - -func (it *HmacKeyMetadataIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// NotificationIterator manages a stream of *storagepb.Notification. -type NotificationIterator struct { - items []*storagepb.Notification - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Notification, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *NotificationIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *NotificationIterator) Next() (*storagepb.Notification, error) { - var item *storagepb.Notification - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *NotificationIterator) bufLen() int { - return len(it.items) -} - -func (it *NotificationIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} - -// ObjectIterator manages a stream of *storagepb.Object. -type ObjectIterator struct { - items []*storagepb.Object - pageInfo *iterator.PageInfo - nextFunc func() error - - // Response is the raw response for the current page. - // It must be cast to the RPC response type. - // Calling Next() or InternalFetch() updates this value. - Response interface{} - - // InternalFetch is for use by the Google Cloud Libraries only. - // It is not part of the stable interface of this package. - // - // InternalFetch returns results from a single call to the underlying RPC. - // The number of results is no greater than pageSize. - // If there are no more results, nextPageToken is empty and err is nil. - InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error) -} - -// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. -func (it *ObjectIterator) PageInfo() *iterator.PageInfo { - return it.pageInfo -} - -// Next returns the next result. Its second return value is iterator.Done if there are no more -// results. Once Next returns Done, all subsequent calls will return Done. -func (it *ObjectIterator) Next() (*storagepb.Object, error) { - var item *storagepb.Object - if err := it.nextFunc(); err != nil { - return item, err - } - item = it.items[0] - it.items = it.items[1:] - return item, nil -} - -func (it *ObjectIterator) bufLen() int { - return len(it.items) -} - -func (it *ObjectIterator) takeBuf() interface{} { - b := it.items - it.items = nil - return b -} diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go similarity index 66% rename from vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go rename to vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index f81e216c57..3486fd1533 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,19 +14,19 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.23.2 // source: google/storage/v2/storage.proto -package storage +package storagepb import ( context "context" reflect "reflect" sync "sync" + iampb "cloud.google.com/go/iam/apiv1/iampb" _ "google.golang.org/genproto/googleapis/api/annotations" - v1 "google.golang.org/genproto/googleapis/iam/v1" date "google.golang.org/genproto/googleapis/type/date" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} } // Request message for DeleteBucket. @@ -337,9 +337,12 @@ type CreateBucketRequest struct { // Required. The project to which this bucket will belong. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Properties of the new bucket being inserted. - // The project and name of the bucket are specified in the parent and - // bucket_id fields, respectively. Populating those fields in `bucket` will - // result in an error. + // The name of the bucket is specified in the `bucket_id` field. Populating + // `bucket.name` field will result in an error. + // The project of the bucket must be specified in the `bucket.project` field. + // This field must be in `projects/{projectIdentifier}` format, + // {projectIdentifier} can be the project ID or project number. The `parent` + // field must be either empty or `projects/_`. Bucket *Bucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` // Required. The ID to use for this bucket, which will become the final // component of the bucket's resource name. For example, the value `foo` might @@ -663,8 +666,6 @@ type UpdateBucketRequest struct { // may accidentally reset the new field's value. // // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } @@ -742,18 +743,18 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { return nil } -// Request message for DeleteNotification. -type DeleteNotificationRequest struct { +// Request message for DeleteNotificationConfig. +type DeleteNotificationConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The parent bucket of the notification. + // Required. The parent bucket of the NotificationConfig. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *DeleteNotificationRequest) Reset() { - *x = DeleteNotificationRequest{} +func (x *DeleteNotificationConfigRequest) Reset() { + *x = DeleteNotificationConfigRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -761,13 +762,13 @@ func (x *DeleteNotificationRequest) Reset() { } } -func (x *DeleteNotificationRequest) String() string { +func (x *DeleteNotificationConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteNotificationRequest) ProtoMessage() {} +func (*DeleteNotificationConfigRequest) ProtoMessage() {} -func (x *DeleteNotificationRequest) ProtoReflect() protoreflect.Message { +func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -779,32 +780,32 @@ func (x *DeleteNotificationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteNotificationRequest.ProtoReflect.Descriptor instead. -func (*DeleteNotificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7} } -func (x *DeleteNotificationRequest) GetName() string { +func (x *DeleteNotificationConfigRequest) GetName() string { if x != nil { return x.Name } return "" } -// Request message for GetNotification. -type GetNotificationRequest struct { +// Request message for GetNotificationConfig. +type GetNotificationConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The parent bucket of the notification. + // Required. The parent bucket of the NotificationConfig. // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *GetNotificationRequest) Reset() { - *x = GetNotificationRequest{} +func (x *GetNotificationConfigRequest) Reset() { + *x = GetNotificationConfigRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -812,13 +813,13 @@ func (x *GetNotificationRequest) Reset() { } } -func (x *GetNotificationRequest) String() string { +func (x *GetNotificationConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetNotificationRequest) ProtoMessage() {} +func (*GetNotificationConfigRequest) ProtoMessage() {} -func (x *GetNotificationRequest) ProtoReflect() protoreflect.Message { +func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -830,32 +831,32 @@ func (x *GetNotificationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetNotificationRequest.ProtoReflect.Descriptor instead. -func (*GetNotificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8} } -func (x *GetNotificationRequest) GetName() string { +func (x *GetNotificationConfigRequest) GetName() string { if x != nil { return x.Name } return "" } -// Request message for CreateNotification. -type CreateNotificationRequest struct { +// Request message for CreateNotificationConfig. +type CreateNotificationConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The bucket to which this notification belongs. + // Required. The bucket to which this NotificationConfig belongs. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. Properties of the notification to be inserted. - Notification *Notification `protobuf:"bytes,2,opt,name=notification,proto3" json:"notification,omitempty"` + // Required. Properties of the NotificationConfig to be inserted. + NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"` } -func (x *CreateNotificationRequest) Reset() { - *x = CreateNotificationRequest{} +func (x *CreateNotificationConfigRequest) Reset() { + *x = CreateNotificationConfigRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -863,13 +864,13 @@ func (x *CreateNotificationRequest) Reset() { } } -func (x *CreateNotificationRequest) String() string { +func (x *CreateNotificationConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateNotificationRequest) ProtoMessage() {} +func (*CreateNotificationConfigRequest) ProtoMessage() {} -func (x *CreateNotificationRequest) ProtoReflect() protoreflect.Message { +func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -881,48 +882,47 @@ func (x *CreateNotificationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateNotificationRequest.ProtoReflect.Descriptor instead. -func (*CreateNotificationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead. +func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9} } -func (x *CreateNotificationRequest) GetParent() string { +func (x *CreateNotificationConfigRequest) GetParent() string { if x != nil { return x.Parent } return "" } -func (x *CreateNotificationRequest) GetNotification() *Notification { +func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig { if x != nil { - return x.Notification + return x.NotificationConfig } return nil } // Request message for ListNotifications. -type ListNotificationsRequest struct { +type ListNotificationConfigsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Required. Name of a Google Cloud Storage bucket. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // The maximum number of notifications to return. The service may return fewer - // than this value. - // The default value is 100. Specifying a value above 100 will result in a - // page_size of 100. + // The maximum number of NotificationConfigs to return. The service may + // return fewer than this value. The default value is 100. Specifying a value + // above 100 will result in a page_size of 100. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A page token, received from a previous `ListNotifications` call. + // A page token, received from a previous `ListNotificationConfigs` call. // Provide this to retrieve the subsequent page. // - // When paginating, all other parameters provided to `ListNotifications` must - // match the call that provided the page token. + // When paginating, all other parameters provided to `ListNotificationConfigs` + // must match the call that provided the page token. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` } -func (x *ListNotificationsRequest) Reset() { - *x = ListNotificationsRequest{} +func (x *ListNotificationConfigsRequest) Reset() { + *x = ListNotificationConfigsRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -930,13 +930,13 @@ func (x *ListNotificationsRequest) Reset() { } } -func (x *ListNotificationsRequest) String() string { +func (x *ListNotificationConfigsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListNotificationsRequest) ProtoMessage() {} +func (*ListNotificationConfigsRequest) ProtoMessage() {} -func (x *ListNotificationsRequest) ProtoReflect() protoreflect.Message { +func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -948,47 +948,47 @@ func (x *ListNotificationsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListNotificationsRequest.ProtoReflect.Descriptor instead. -func (*ListNotificationsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead. +func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10} } -func (x *ListNotificationsRequest) GetParent() string { +func (x *ListNotificationConfigsRequest) GetParent() string { if x != nil { return x.Parent } return "" } -func (x *ListNotificationsRequest) GetPageSize() int32 { +func (x *ListNotificationConfigsRequest) GetPageSize() int32 { if x != nil { return x.PageSize } return 0 } -func (x *ListNotificationsRequest) GetPageToken() string { +func (x *ListNotificationConfigsRequest) GetPageToken() string { if x != nil { return x.PageToken } return "" } -// The result of a call to Notifications.ListNotifications -type ListNotificationsResponse struct { +// The result of a call to ListNotificationConfigs +type ListNotificationConfigsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // The list of items. - Notifications []*Notification `protobuf:"bytes,1,rep,name=notifications,proto3" json:"notifications,omitempty"` + NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"` // A token, which can be sent as `page_token` to retrieve the next page. // If this field is omitted, there are no subsequent pages. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` } -func (x *ListNotificationsResponse) Reset() { - *x = ListNotificationsResponse{} +func (x *ListNotificationConfigsResponse) Reset() { + *x = ListNotificationConfigsResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_storage_v2_storage_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -996,13 +996,13 @@ func (x *ListNotificationsResponse) Reset() { } } -func (x *ListNotificationsResponse) String() string { +func (x *ListNotificationConfigsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListNotificationsResponse) ProtoMessage() {} +func (*ListNotificationConfigsResponse) ProtoMessage() {} -func (x *ListNotificationsResponse) ProtoReflect() protoreflect.Message { +func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_storage_v2_storage_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1014,19 +1014,19 @@ func (x *ListNotificationsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListNotificationsResponse.ProtoReflect.Descriptor instead. -func (*ListNotificationsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead. +func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11} } -func (x *ListNotificationsResponse) GetNotifications() []*Notification { +func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig { if x != nil { - return x.Notifications + return x.NotificationConfigs } return nil } -func (x *ListNotificationsResponse) GetNextPageToken() string { +func (x *ListNotificationConfigsResponse) GetNextPageToken() string { if x != nil { return x.NextPageToken } @@ -1277,6 +1277,137 @@ func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectReques return nil } +// Message for restoring an object. +// `bucket`, `object`, and `generation` **must** be set. +type RestoreObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to restore. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // Required. The specific revision of the object to restore. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // If false or unset, the bucket's default object ACL will be used. + // If true, copy the source object's access controls. + // Return an error if bucket has UBLA enabled. + CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *RestoreObjectRequest) Reset() { + *x = RestoreObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreObjectRequest) ProtoMessage() {} + +func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. +func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *RestoreObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *RestoreObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *RestoreObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetCopySourceAcl() bool { + if x != nil && x.CopySourceAcl != nil { + return *x.CopySourceAcl + } + return false +} + +func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + // Message for canceling an in-progress resumable upload. // `upload_id` **must** be set. type CancelResumableWriteRequest struct { @@ -1292,7 +1423,7 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1305,7 +1436,7 @@ func (x *CancelResumableWriteRequest) String() string { func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1318,7 +1449,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} } func (x *CancelResumableWriteRequest) GetUploadId() string { @@ -1339,7 +1470,7 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1352,7 +1483,7 @@ func (x *CancelResumableWriteResponse) String() string { func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1365,7 +1496,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} } // Request message for ReadObject. @@ -1427,7 +1558,7 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1440,7 +1571,7 @@ func (x *ReadObjectRequest) String() string { func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1453,7 +1584,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} } func (x *ReadObjectRequest) GetBucket() string { @@ -1546,6 +1677,8 @@ type GetObjectRequest struct { // If present, selects a specific revision of this object (as opposed to the // latest version, the default). Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // If true, return the soft-deleted version of this object. + SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"` // Makes the operation conditional on whether the object's current generation // matches the given value. Setting to 0 makes the operation succeed only if // there are no live versions of the object. @@ -1573,7 +1706,7 @@ type GetObjectRequest struct { func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1586,7 +1719,7 @@ func (x *GetObjectRequest) String() string { func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1599,7 +1732,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} } func (x *GetObjectRequest) GetBucket() string { @@ -1623,6 +1756,13 @@ func (x *GetObjectRequest) GetGeneration() int64 { return 0 } +func (x *GetObjectRequest) GetSoftDeleted() bool { + if x != nil && x.SoftDeleted != nil { + return *x.SoftDeleted + } + return false +} + func (x *GetObjectRequest) GetIfGenerationMatch() int64 { if x != nil && x.IfGenerationMatch != nil { return *x.IfGenerationMatch @@ -1676,9 +1816,9 @@ type ReadObjectResponse struct { // client that the request is still live while it is running an operation to // generate more data. ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"` - // The checksums of the complete object. The client should compute one of - // these checksums over the downloaded object and compare it against the value - // provided here. + // The checksums of the complete object. If the object is downloaded in full, + // the client should compute one of these checksums over the downloaded object + // and compare it against the value provided here. ObjectChecksums *ObjectChecksums `protobuf:"bytes,2,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` // If read_offset and or read_limit was specified on the // ReadObjectRequest, ContentRange will be populated on the first @@ -1692,7 +1832,7 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1705,7 +1845,7 @@ func (x *ReadObjectResponse) String() string { func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1718,7 +1858,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} } func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { @@ -1783,15 +1923,13 @@ type WriteObjectSpec struct { // This situation is considered a client error, and if such an error occurs // you must start the upload over from scratch, this time sending the correct // number of bytes. - // - // The `object_size` value is ignored for one-shot (non-resumable) writes. ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"` } func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1804,7 +1942,7 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1817,7 +1955,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} } func (x *WriteObjectSpec) GetResource() *Object { @@ -1878,7 +2016,6 @@ type WriteObjectRequest struct { // The first message of each stream should set one of the following. // // Types that are assignable to FirstMessage: - // // *WriteObjectRequest_UploadId // *WriteObjectRequest_WriteObjectSpec FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` @@ -1899,11 +2036,10 @@ type WriteObjectRequest struct { // A portion of the data for the object. // // Types that are assignable to Data: - // // *WriteObjectRequest_ChecksummedData Data isWriteObjectRequest_Data `protobuf_oneof:"data"` // Checksums for the complete object. If the checksums computed by the service - // don't match the specifified checksums the call will fail. May only be + // don't match the specified checksums the call will fail. May only be // provided in the first or last request (either with first_message, or // finish_write set). ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` @@ -1921,7 +2057,7 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1934,7 +2070,7 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1947,7 +2083,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2054,7 +2190,6 @@ type WriteObjectResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: - // // *WriteObjectResponse_PersistedSize // *WriteObjectResponse_Resource WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2063,7 +2198,7 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2076,7 +2211,7 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2089,7 +2224,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2133,74 +2268,83 @@ func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} -// Request message for ListObjects. -type ListObjectsRequest struct { +// Request message for BidiWriteObject. +type BidiWriteObjectRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Name of the bucket in which to look for objects. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Maximum number of `items` plus `prefixes` to return - // in a single page of responses. As duplicate `prefixes` are - // omitted, fewer total results may be returned than requested. The service - // will use this parameter or 1,000 items, whichever is smaller. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // A previously-returned page token representing part of the larger set of - // results to view. - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - // If set, returns results in a directory-like mode. `items` will contain - // only objects whose names, aside from the `prefix`, do not - // contain `delimiter`. Objects whose names, aside from the - // `prefix`, contain `delimiter` will have their name, - // truncated after the `delimiter`, returned in - // `prefixes`. Duplicate `prefixes` are omitted. - Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` - // If true, objects that end in exactly one instance of `delimiter` - // will have their metadata included in `items` in addition to - // `prefixes`. - IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` - // Filter results to objects whose names begin with this prefix. - Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` - // If `true`, lists all versions of an object as distinct results. - // For more information, see - // [Object - // Versioning](https://cloud.google.com/storage/docs/object-versioning). - Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` - // Mask specifying which fields to read from each result. - // If no mask is specified, will default to all fields except items.acl and - // items.owner. - // * may be used to mean "all fields". - ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` - // Optional. Filter results to objects whose names are lexicographically equal - // to or after lexicographic_start. If lexicographic_end is also set, the - // objects listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` - // Optional. Filter results to objects whose names are lexicographically - // before lexicographic_end. If lexicographic_start is also set, the objects - // listed have names between lexicographic_start (inclusive) and - // lexicographic_end (exclusive). - LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // *BidiWriteObjectRequest_UploadId + // *BidiWriteObjectRequest_WriteObjectSpec + FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An invalid value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // *BidiWriteObjectRequest_ChecksummedData + Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // For each BidiWriteObjectRequest where state_lookup is `true` or the client + // closes the stream, the service will send a BidiWriteObjectResponse + // containing the current persisted size. The persisted size sent in responses + // covers all the bytes the server has persisted thus far and can be used to + // decide what data is safe for the client to drop. Note that the object's + // current size reported by the BidiWriteObjectResponse may lag behind the + // number of bytes written by the client. + StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` + // Persists data written on the stream, up to and including the current + // message, to permanent storage. This option should be used sparingly as it + // may reduce performance. Ongoing writes will periodically be persisted on + // the server even when `flush` is not set. + Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` } -func (x *ListObjectsRequest) Reset() { - *x = ListObjectsRequest{} +func (x *BidiWriteObjectRequest) Reset() { + *x = BidiWriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ListObjectsRequest) String() string { +func (x *BidiWriteObjectRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListObjectsRequest) ProtoMessage() {} +func (*BidiWriteObjectRequest) ProtoMessage() {} -func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] +func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2211,16 +2355,303 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. -func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} +// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} } -func (x *ListObjectsRequest) GetParent() string { - if x != nil { - return x.Parent +func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage } - return "" + return nil +} + +func (x *BidiWriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *BidiWriteObjectRequest) GetStateLookup() bool { + if x != nil { + return x.StateLookup + } + return false +} + +func (x *BidiWriteObjectRequest) GetFlush() bool { + if x != nil { + return x.Flush + } + return false +} + +func (x *BidiWriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isBidiWriteObjectRequest_FirstMessage interface { + isBidiWriteObjectRequest_FirstMessage() +} + +type BidiWriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type BidiWriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} + +func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} + +type isBidiWriteObjectRequest_Data interface { + isBidiWriteObjectRequest_Data() +} + +type BidiWriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {} + +// Response message for BidiWriteObject. +type BidiWriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // *BidiWriteObjectResponse_PersistedSize + // *BidiWriteObjectResponse_Resource + WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *BidiWriteObjectResponse) Reset() { + *x = BidiWriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BidiWriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BidiWriteObjectResponse) ProtoMessage() {} + +func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +} + +func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *BidiWriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *BidiWriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isBidiWriteObjectResponse_WriteStatus interface { + isBidiWriteObjectResponse_WriteStatus() +} + +type BidiWriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type BidiWriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {} + +func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {} + +// Request message for ListObjects. +type ListObjectsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which to look for objects. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Maximum number of `items` plus `prefixes` to return + // in a single page of responses. As duplicate `prefixes` are + // omitted, fewer total results may be returned than requested. The service + // will use this parameter or 1,000 items, whichever is smaller. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // A previously-returned page token representing part of the larger set of + // results to view. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + // If set, returns results in a directory-like mode. `items` will contain + // only objects whose names, aside from the `prefix`, do not + // contain `delimiter`. Objects whose names, aside from the + // `prefix`, contain `delimiter` will have their name, + // truncated after the `delimiter`, returned in + // `prefixes`. Duplicate `prefixes` are omitted. + Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"` + // If true, objects that end in exactly one instance of `delimiter` + // will have their metadata included in `items` in addition to + // `prefixes`. + IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"` + // Filter results to objects whose names begin with this prefix. + Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"` + // If `true`, lists all versions of an object as distinct results. + // For more information, see + // [Object + // Versioning](https://cloud.google.com/storage/docs/object-versioning). + Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"` + // Mask specifying which fields to read from each result. + // If no mask is specified, will default to all fields except items.acl and + // items.owner. + // * may be used to mean "all fields". + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + // Optional. Filter results to objects whose names are lexicographically equal + // to or after lexicographic_start. If lexicographic_end is also set, the + // objects listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"` + // Optional. Filter results to objects whose names are lexicographically + // before lexicographic_end. If lexicographic_start is also set, the objects + // listed have names between lexicographic_start (inclusive) and + // lexicographic_end (exclusive). + LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` + // Optional. If true, only list all soft-deleted versions of the object. + // Soft delete policy is required to set this option. + SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` + // Optional. Filter results to objects and prefixes that match this glob + // pattern. See [List Objects Using + // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) + // for the full syntax. + MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` +} + +func (x *ListObjectsRequest) Reset() { + *x = ListObjectsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListObjectsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListObjectsRequest) ProtoMessage() {} + +func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. +func (*ListObjectsRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} +} + +func (x *ListObjectsRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" } func (x *ListObjectsRequest) GetPageSize() int32 { @@ -2286,6 +2717,20 @@ func (x *ListObjectsRequest) GetLexicographicEnd() string { return "" } +func (x *ListObjectsRequest) GetSoftDeleted() bool { + if x != nil { + return x.SoftDeleted + } + return false +} + +func (x *ListObjectsRequest) GetMatchGlob() string { + if x != nil { + return x.MatchGlob + } + return "" +} + // Request object for `QueryWriteStatus`. type QueryWriteStatusRequest struct { state protoimpl.MessageState @@ -2302,7 +2747,7 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2315,7 +2760,7 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2328,7 +2773,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -2354,7 +2799,6 @@ type QueryWriteStatusResponse struct { // The response will set one of the following. // // Types that are assignable to WriteStatus: - // // *QueryWriteStatusResponse_PersistedSize // *QueryWriteStatusResponse_Resource WriteStatus isQueryWriteStatusResponse_WriteStatus `protobuf_oneof:"write_status"` @@ -2363,7 +2807,7 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2376,7 +2820,7 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2389,7 +2833,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -2547,7 +2991,7 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2560,7 +3004,7 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2573,7 +3017,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -2763,7 +3207,7 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2776,7 +3220,7 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2789,7 +3233,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -2848,7 +3292,7 @@ type StartResumableWriteRequest struct { func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2861,7 +3305,7 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2874,7 +3318,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -2912,7 +3356,7 @@ type StartResumableWriteResponse struct { func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2925,7 +3369,7 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2938,7 +3382,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -2987,8 +3431,6 @@ type UpdateObjectRequest struct { // may accidentally reset the new field's value. // // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // A set of parameters common to Storage API requests concerning an object. CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` @@ -2997,7 +3439,7 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3010,7 +3452,7 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3023,7 +3465,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} } func (x *UpdateObjectRequest) GetObject() *Object { @@ -3088,15 +3530,15 @@ type GetServiceAccountRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Project ID, in the format of "projects/". - // can be the project ID or project number. + // Required. Project ID, in the format of "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` } func (x *GetServiceAccountRequest) Reset() { *x = GetServiceAccountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3109,7 +3551,7 @@ func (x *GetServiceAccountRequest) String() string { func (*GetServiceAccountRequest) ProtoMessage() {} func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3122,7 +3564,7 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} } func (x *GetServiceAccountRequest) GetProject() string { @@ -3139,7 +3581,7 @@ type CreateHmacKeyRequest struct { unknownFields protoimpl.UnknownFields // Required. The project that the HMAC-owning service account lives in, in the - // format of "projects/". can be the + // format of "projects/{projectIdentifier}". {projectIdentifier} can be the // project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // Required. The service account to create the HMAC for. @@ -3149,7 +3591,7 @@ type CreateHmacKeyRequest struct { func (x *CreateHmacKeyRequest) Reset() { *x = CreateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3162,7 +3604,7 @@ func (x *CreateHmacKeyRequest) String() string { func (*CreateHmacKeyRequest) ProtoMessage() {} func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3175,7 +3617,7 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} } func (x *CreateHmacKeyRequest) GetProject() string { @@ -3208,7 +3650,7 @@ type CreateHmacKeyResponse struct { func (x *CreateHmacKeyResponse) Reset() { *x = CreateHmacKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3221,7 +3663,7 @@ func (x *CreateHmacKeyResponse) String() string { func (*CreateHmacKeyResponse) ProtoMessage() {} func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3234,7 +3676,7 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} } func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { @@ -3260,15 +3702,15 @@ type DeleteHmacKeyRequest struct { // Required. The identifying key for the HMAC to delete. AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` // Required. The project that owns the HMAC key, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } func (x *DeleteHmacKeyRequest) Reset() { *x = DeleteHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3281,7 +3723,7 @@ func (x *DeleteHmacKeyRequest) String() string { func (*DeleteHmacKeyRequest) ProtoMessage() {} func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3294,7 +3736,7 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} } func (x *DeleteHmacKeyRequest) GetAccessId() string { @@ -3320,15 +3762,15 @@ type GetHmacKeyRequest struct { // Required. The identifying key for the HMAC to delete. AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` // Required. The project the HMAC key lies in, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` } func (x *GetHmacKeyRequest) Reset() { *x = GetHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3341,7 +3783,7 @@ func (x *GetHmacKeyRequest) String() string { func (*GetHmacKeyRequest) ProtoMessage() {} func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3354,7 +3796,7 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} } func (x *GetHmacKeyRequest) GetAccessId() string { @@ -3378,8 +3820,8 @@ type ListHmacKeysRequest struct { unknownFields protoimpl.UnknownFields // Required. The project to list HMAC keys for, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // The maximum number of keys to return. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` @@ -3394,7 +3836,7 @@ type ListHmacKeysRequest struct { func (x *ListHmacKeysRequest) Reset() { *x = ListHmacKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3407,7 +3849,7 @@ func (x *ListHmacKeysRequest) String() string { func (*ListHmacKeysRequest) ProtoMessage() {} func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3420,7 +3862,7 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} } func (x *ListHmacKeysRequest) GetProject() string { @@ -3474,7 +3916,7 @@ type ListHmacKeysResponse struct { func (x *ListHmacKeysResponse) Reset() { *x = ListHmacKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3487,7 +3929,7 @@ func (x *ListHmacKeysResponse) String() string { func (*ListHmacKeysResponse) ProtoMessage() {} func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3500,7 +3942,7 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} } func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { @@ -3540,7 +3982,7 @@ type UpdateHmacKeyRequest struct { func (x *UpdateHmacKeyRequest) Reset() { *x = UpdateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3553,7 +3995,7 @@ func (x *UpdateHmacKeyRequest) String() string { func (*UpdateHmacKeyRequest) ProtoMessage() {} func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3566,7 +4008,7 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} } func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { @@ -3603,7 +4045,7 @@ type CommonObjectRequestParams struct { func (x *CommonObjectRequestParams) Reset() { *x = CommonObjectRequestParams{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3616,7 +4058,7 @@ func (x *CommonObjectRequestParams) String() string { func (*CommonObjectRequestParams) ProtoMessage() {} func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3629,7 +4071,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { // Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} } func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { @@ -3663,7 +4105,7 @@ type ServiceConstants struct { func (x *ServiceConstants) Reset() { *x = ServiceConstants{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3676,7 +4118,7 @@ func (x *ServiceConstants) String() string { func (*ServiceConstants) ProtoMessage() {} func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3689,7 +4131,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} } // A bucket. @@ -3699,6 +4141,7 @@ type Bucket struct { unknownFields protoimpl.UnknownFields // Immutable. The name of the bucket. + // Format: `projects/{project}/buckets/{bucket}` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Output only. The user-chosen part of the bucket name. The `{bucket}` // portion of the `name` field. For globally unique buckets, this is equal to @@ -3709,12 +4152,10 @@ type Bucket struct { // only be performed if the etag matches that of the bucket. Etag string `protobuf:"bytes,29,opt,name=etag,proto3" json:"etag,omitempty"` // Immutable. The project which owns this bucket, in the format of - // "projects/". - // can be the project ID or project number. + // "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` // Output only. The metadata generation of this bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` // Immutable. The location of the bucket. Object data for objects in the // bucket resides in physical storage within this region. Defaults to `US`. @@ -3738,7 +4179,7 @@ type Bucket struct { // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region // buckets only. If rpo is not specified when the bucket is created, it // defaults to "DEFAULT". For more information, see - // https://cloud.google.com/storage/docs/turbo-replication. + // https://cloud.google.com/storage/docs/availability-durability#turbo-replication. Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` // Access controls on the bucket. // If iam_config.uniform_bucket_level_access is enabled on this bucket, @@ -3753,15 +4194,11 @@ type Bucket struct { // for more information. Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` // Output only. The creation time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] // (CORS) config. Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` // Output only. The modification time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The default value for event-based hold on newly created objects in this // bucket. Event-based hold is a way to retain objects indefinitely until an @@ -3816,12 +4253,15 @@ type Bucket struct { // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` + // Optional. The bucket's soft delete policy. The soft delete policy prevents + // soft-deleted objects from being permanently deleted. + SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` } func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3834,7 +4274,7 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3847,7 +4287,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} } func (x *Bucket) GetName() string { @@ -4046,6 +4486,13 @@ func (x *Bucket) GetAutoclass() *Bucket_Autoclass { return nil } +func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { + if x != nil { + return x.SoftDeletePolicy + } + return nil +} + // An access-control entry. type BucketAccessControl struct { state protoimpl.MessageState @@ -4096,7 +4543,7 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4109,7 +4556,7 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4122,7 +4569,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} } func (x *BucketAccessControl) GetRole() string { @@ -4195,7 +4642,7 @@ type ChecksummedData struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The data. + // Optional. The data. Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` // If set, the CRC32C digest of the content field. Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` @@ -4204,7 +4651,7 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4217,7 +4664,7 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4230,7 +4677,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} } func (x *ChecksummedData) GetContent() []byte { @@ -4255,7 +4702,7 @@ type ObjectChecksums struct { // CRC32C digest of the object data. Computed by the Cloud Storage service for // all written objects. - // If set in an WriteObjectRequest, service will validate that the stored + // If set in a WriteObjectRequest, service will validate that the stored // object matches this checksum. Crc32C *uint32 `protobuf:"fixed32,1,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` // 128 bit MD5 hash of the object data. @@ -4271,7 +4718,7 @@ type ObjectChecksums struct { func (x *ObjectChecksums) Reset() { *x = ObjectChecksums{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4284,7 +4731,7 @@ func (x *ObjectChecksums) String() string { func (*ObjectChecksums) ProtoMessage() {} func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4297,7 +4744,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} } func (x *ObjectChecksums) GetCrc32C() uint32 { @@ -4321,14 +4768,14 @@ type HmacKeyMetadata struct { unknownFields protoimpl.UnknownFields // Immutable. Resource name ID of the key in the format - // /. - // can be the project ID or project number. + // {projectIdentifier}/{accessId}. + // {projectIdentifier} can be the project ID or project number. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Immutable. Globally unique id for keys. AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"` // Immutable. Identifies the project that owns the service account of the - // specified HMAC key, in the format "projects/". - // can be the project ID or project number. + // specified HMAC key, in the format "projects/{projectIdentifier}". + // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` // Output only. Email of the service account the key authenticates as. ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` @@ -4346,7 +4793,7 @@ type HmacKeyMetadata struct { func (x *HmacKeyMetadata) Reset() { *x = HmacKeyMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4359,7 +4806,7 @@ func (x *HmacKeyMetadata) String() string { func (*HmacKeyMetadata) ProtoMessage() {} func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4372,7 +4819,7 @@ func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} } func (x *HmacKeyMetadata) GetId() string { @@ -4432,54 +4879,54 @@ func (x *HmacKeyMetadata) GetEtag() string { } // A directive to publish Pub/Sub notifications upon changes to a bucket. -type Notification struct { +type NotificationConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The resource name of this notification. + // Required. The resource name of this NotificationConfig. // Format: - // `projects/{project}/buckets/{bucket}/notificationConfigs/{notification}` + // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}` // The `{project}` portion may be `_` for globally unique buckets. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The Pub/Sub topic to which this subscription publishes. Formatted // as: // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` - // The etag of the Notification. - // If included in the metadata of GetNotificationRequest, the operation will - // only be performed if the etag matches that of the Notification. + // The etag of the NotificationConfig. + // If included in the metadata of GetNotificationConfigRequest, the operation + // will only be performed if the etag matches that of the NotificationConfig. Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"` - // If present, only send notifications about listed event types. If empty, - // sent notifications for all event types. + // If present, only send notifications about listed event types. If + // empty, sent notifications for all event types. EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` // A list of additional attributes to attach to each Pub/Sub - // message published for this notification subscription. + // message published for this NotificationConfig. CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // If present, only apply this notification config to object names that + // If present, only apply this NotificationConfig to object names that // begin with this prefix. ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"` // Required. The desired content of the Payload. PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"` } -func (x *Notification) Reset() { - *x = Notification{} +func (x *NotificationConfig) Reset() { + *x = NotificationConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Notification) String() string { +func (x *NotificationConfig) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Notification) ProtoMessage() {} +func (*NotificationConfig) ProtoMessage() {} -func (x *Notification) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] +func (x *NotificationConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4490,54 +4937,54 @@ func (x *Notification) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Notification.ProtoReflect.Descriptor instead. -func (*Notification) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} +// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. +func (*NotificationConfig) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} } -func (x *Notification) GetName() string { +func (x *NotificationConfig) GetName() string { if x != nil { return x.Name } return "" } -func (x *Notification) GetTopic() string { +func (x *NotificationConfig) GetTopic() string { if x != nil { return x.Topic } return "" } -func (x *Notification) GetEtag() string { +func (x *NotificationConfig) GetEtag() string { if x != nil { return x.Etag } return "" } -func (x *Notification) GetEventTypes() []string { +func (x *NotificationConfig) GetEventTypes() []string { if x != nil { return x.EventTypes } return nil } -func (x *Notification) GetCustomAttributes() map[string]string { +func (x *NotificationConfig) GetCustomAttributes() map[string]string { if x != nil { return x.CustomAttributes } return nil } -func (x *Notification) GetObjectNamePrefix() string { +func (x *NotificationConfig) GetObjectNamePrefix() string { if x != nil { return x.ObjectNamePrefix } return "" } -func (x *Notification) GetPayloadFormat() string { +func (x *NotificationConfig) GetPayloadFormat() string { if x != nil { return x.PayloadFormat } @@ -4561,7 +5008,7 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4574,7 +5021,7 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4587,7 +5034,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -4626,21 +5073,17 @@ type Object struct { // object. Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` // Immutable. The content generation of this object. Used for object - // versioning. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // versioning. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` // Output only. The version of the metadata for this generation of this // object. Used for preconditions and for detecting changes in metadata. A // metageneration number is only meaningful in the context of a particular - // generation of a particular object. Attempting to set or update this field - // will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // generation of a particular object. Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` // Storage class of the object. StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` // Output only. Content-Length of the object data in bytes, matching // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` // Content-Encoding of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] @@ -4660,10 +5103,8 @@ type Object struct { // Content-Language of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` - // Output only. The deletion time of the object. Will be returned if and only - // if this version of the object has been deleted. Attempting to set or update - // this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // Output only. If this object is noncurrent, this is the time when the object + // became noncurrent. DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. @@ -4671,13 +5112,9 @@ type Object struct { // `application/octet-stream`. ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` // Output only. The creation time of the object. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Number of underlying components that make up this object. - // Components are accumulated by compose operations. Attempting to set or - // update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // Components are accumulated by compose operations. ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` // Output only. Hashes for the data part of this object. This field is used // for output only and will be silently ignored if provided in requests. @@ -4688,16 +5125,12 @@ type Object struct { // such as modifying custom metadata, as well as changes made by Cloud Storage // on behalf of a requester, such as changing the storage class based on an // Object Lifecycle Configuration. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // Cloud KMS Key used to encrypt this object, if the object is encrypted by // such a key. KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` // Output only. The time at which the object's storage class was last changed. // When the object is initially created, it will be set to time_created. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` // Whether an object is under temporary hold. While this flag is set to true, // the object is protected against deletion and overwrites. A common use case @@ -4728,8 +5161,7 @@ type Object struct { // In a response, this field will always be set to true or false. EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` // Output only. The owner of the object. This will always be the uploader of - // the object. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // the object. Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by // such a key. @@ -4741,7 +5173,7 @@ type Object struct { func (x *Object) Reset() { *x = Object{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4754,7 +5186,7 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4767,7 +5199,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} } func (x *Object) GetName() string { @@ -5009,7 +5441,7 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5022,7 +5454,7 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5035,7 +5467,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} } func (x *ObjectAccessControl) GetRole() string { @@ -5120,7 +5552,7 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5133,7 +5565,7 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5146,7 +5578,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -5185,7 +5617,7 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5198,7 +5630,7 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5211,7 +5643,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} } func (x *ProjectTeam) GetProjectNumber() string { @@ -5243,7 +5675,7 @@ type ServiceAccount struct { func (x *ServiceAccount) Reset() { *x = ServiceAccount{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5256,7 +5688,7 @@ func (x *ServiceAccount) String() string { func (*ServiceAccount) ProtoMessage() {} func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5269,7 +5701,7 @@ func (x *ServiceAccount) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} } func (x *ServiceAccount) GetEmailAddress() string { @@ -5294,7 +5726,7 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5307,7 +5739,7 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5320,7 +5752,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} } func (x *Owner) GetEntity() string { @@ -5354,7 +5786,7 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5367,7 +5799,7 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5380,7 +5812,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} } func (x *ContentRange) GetStart() int64 { @@ -5422,7 +5854,7 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5435,7 +5867,7 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5487,7 +5919,7 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5500,7 +5932,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5536,7 +5968,7 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5549,7 +5981,7 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5562,7 +5994,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -5602,7 +6034,7 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5615,7 +6047,7 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5628,7 +6060,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} } func (x *Bucket_Cors) GetOrigin() []string { @@ -5673,7 +6105,7 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5686,7 +6118,7 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5699,7 +6131,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -5725,7 +6157,7 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5738,7 +6170,7 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5751,7 +6183,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -5783,7 +6215,7 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5796,7 +6228,7 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5809,7 +6241,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -5835,7 +6267,7 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5848,7 +6280,7 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5861,7 +6293,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} } func (x *Bucket_Logging) GetLogBucket() string { @@ -5889,11 +6321,6 @@ type Bucket_RetentionPolicy struct { EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` // Once locked, an object retention policy cannot be modified. IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` - // The duration in seconds that objects need to be retained. Retention - // duration must be greater than zero and less than 100 years. Note that - // enforcement of retention periods less than a day is not guaranteed. Such - // periods should only be used for testing purposes. - RetentionPeriod *int64 `protobuf:"varint,3,opt,name=retention_period,json=retentionPeriod,proto3,oneof" json:"retention_period,omitempty"` // The duration that objects need to be retained. Retention duration must be // greater than zero and less than 100 years. Note that enforcement of // retention periods less than a day is not guaranteed. Such periods should @@ -5905,7 +6332,7 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5918,7 +6345,7 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5931,7 +6358,7 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -5948,20 +6375,73 @@ func (x *Bucket_RetentionPolicy) GetIsLocked() bool { return false } -func (x *Bucket_RetentionPolicy) GetRetentionPeriod() int64 { - if x != nil && x.RetentionPeriod != nil { - return *x.RetentionPeriod +func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { + if x != nil { + return x.RetentionDuration } - return 0 + return nil } -func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { +// Soft delete policy properties of a bucket. +type Bucket_SoftDeletePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The period of time that soft-deleted objects in the bucket must be + // retained and cannot be permanently deleted. The duration must be greater + // than or equal to 7 days and less than 1 year. + RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"` + // Time from which the policy was effective. This is service-provided. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"` +} + +func (x *Bucket_SoftDeletePolicy) Reset() { + *x = Bucket_SoftDeletePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_SoftDeletePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_SoftDeletePolicy) ProtoMessage() {} + +func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. +func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} +} + +func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { if x != nil { return x.RetentionDuration } return nil } +func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + // Properties of a bucket related to versioning. // For more on Cloud Storage versioning, see // https://cloud.google.com/storage/docs/object-versioning. @@ -5977,7 +6457,7 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5990,7 +6470,7 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6003,7 +6483,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} } func (x *Bucket_Versioning) GetEnabled() bool { @@ -6037,7 +6517,7 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6050,7 +6530,7 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6063,7 +6543,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -6095,7 +6575,7 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6108,7 +6588,7 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6121,7 +6601,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -6144,12 +6624,19 @@ type Bucket_Autoclass struct { // Autoclass is enabled when the bucket is created, the toggle_time is set // to the bucket creation time. ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + // An object in an Autoclass bucket will eventually cool down to the + // terminal storage class if there is no access to the object. + // The only valid values are NEARLINE and ARCHIVE. + TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"` + // Output only. Latest instant at which the autoclass terminal storage class + // was updated. + TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"` } func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6162,7 +6649,7 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6175,7 +6662,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -6192,6 +6679,20 @@ func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { return nil } +func (x *Bucket_Autoclass) GetTerminalStorageClass() string { + if x != nil && x.TerminalStorageClass != nil { + return *x.TerminalStorageClass + } + return "" +} + +func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.TerminalStorageClassUpdateTime + } + return nil +} + // Settings for Uniform Bucket level access. // See https://cloud.google.com/storage/docs/uniform-bucket-level-access. type Bucket_IamConfig_UniformBucketLevelAccess struct { @@ -6211,7 +6712,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6224,7 +6725,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6237,7 +6738,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6270,7 +6771,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6283,7 +6784,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6296,7 +6797,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6330,7 +6831,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6343,7 +6844,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6356,7 +6857,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -6428,7 +6929,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6441,7 +6942,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6454,7 +6955,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -6599,109 +7100,25 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0xa1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, - 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, - 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x41, 0x63, 0x6c, 0x22, 0x81, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, - 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, - 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, - 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, - 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x5c, 0x0a, 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x2b, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x25, 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x19, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, - 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x48, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x95, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, + 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, @@ -6709,769 +7126,968 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, - 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, - 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, - 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, - 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, - 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, - 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, - 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, - 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xc0, 0x04, 0x0a, 0x13, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, + 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, + 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, + 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, + 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, + 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, + 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, + 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, + 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, + 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, + 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, + 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, + 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x3f, + 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, + 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xca, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, + 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, + 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, + 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, + 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, + 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, + 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, + 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, + 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, + 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, + 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, + 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, + 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, + 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, + 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, - 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, - 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x89, 0x05, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, - 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, - 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, - 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, + 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, + 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, + 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, - 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, - 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, - 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, - 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, - 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, - 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, - 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, + 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, + 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, - 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, - 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, - 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, - 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, - 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, - 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, - 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, - 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, - 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, - 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, - 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, - 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, - 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, - 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x93, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, - 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, - 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, + 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, - 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, - 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, - 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, - 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, - 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, - 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, - 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, - 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, - 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, - 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, - 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, - 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, - 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, - 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, - 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, - 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, - 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, + 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, - 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, - 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, - 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, - 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, - 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, + 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, + 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, + 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, + 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, + 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, + 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, + 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, + 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, + 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, + 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, + 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, + 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, + 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, + 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, + 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, + 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, + 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, + 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, + 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, + 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, + 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, + 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, + 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, + 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, + 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, - 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, - 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, - 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, - 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, - 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, - 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, - 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, - 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, - 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, - 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, - 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, - 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, - 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, - 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, - 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, - 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, - 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, - 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, - 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, - 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, - 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, - 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, - 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, - 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, - 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, - 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, - 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, - 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, - 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, - 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, - 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, - 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, - 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, - 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, - 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, - 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, - 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, - 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, - 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, - 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, - 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, - 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf0, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, - 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, - 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, - 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, - 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, - 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, + 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, + 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, + 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, + 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, + 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, + 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, + 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, - 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, - 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, - 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, - 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, - 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, - 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, - 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, - 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, - 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, - 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, - 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, - 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, - 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, - 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, + 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, + 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, + 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, + 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, + 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, + 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, + 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, @@ -7575,593 +8191,642 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0x80, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x72, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, - 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, 0x53, - 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, 0x75, - 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, - 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x61, - 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x67, 0x0a, 0x09, 0x41, - 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, + 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, + 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, + 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, + 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, + 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, + 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, + 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, + 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, + 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, + 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, + 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, + 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, - 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, - 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, - 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, - 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, - 0x61, 0x6d, 0x22, 0x53, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, - 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, - 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, - 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, - 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, - 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, - 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, - 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, - 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0xec, - 0x03, 0x0a, 0x0c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, - 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, - 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x70, 0xea, 0x41, 0x6d, - 0x0a, 0x23, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x71, 0x0a, - 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, - 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, - 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, - 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, - 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, - 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, - 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, - 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, - 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, - 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, - 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, - 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, + 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, + 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, + 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, + 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, - 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, - 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, - 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, - 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, - 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x22, - 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, - 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, 0x4f, - 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaf, 0x25, 0x0a, 0x07, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, - 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x0c, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, - 0x26, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x60, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x22, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, - 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x14, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, + 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, + 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, + 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, + 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, + 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, + 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, + 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, + 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x28, 0x0a, 0x07, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, - 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, - 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x33, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x96, 0x01, 0x0a, 0x11, - 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, - 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, - 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, - 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x95, 0x01, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x48, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, - 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, - 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, - 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, + 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x22, 0x39, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x60, 0x0a, 0x0b, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, - 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, + 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, + 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, + 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, + 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, + 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x06, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, + 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, - 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, - 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, - 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, - 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, - 0x64, 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, - 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x8a, 0xd3, 0xe4, 0x93, 0x02, - 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x1d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x77, 0x0a, 0x0d, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, - 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, + 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, + 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, + 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x3f, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, - 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0xda, 0x41, 0x14, 0x68, 0x6d, - 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, - 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, + 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, + 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, 0x14, + 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, 0xca, + 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, + 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, + 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, - 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, - 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xdc, 0x01, 0x0a, - 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x76, 0x32, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, - 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, - 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, + 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, + 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } @@ -8178,7 +8843,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78) var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8188,240 +8853,258 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse (*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest (*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest - (*DeleteNotificationRequest)(nil), // 8: google.storage.v2.DeleteNotificationRequest - (*GetNotificationRequest)(nil), // 9: google.storage.v2.GetNotificationRequest - (*CreateNotificationRequest)(nil), // 10: google.storage.v2.CreateNotificationRequest - (*ListNotificationsRequest)(nil), // 11: google.storage.v2.ListNotificationsRequest - (*ListNotificationsResponse)(nil), // 12: google.storage.v2.ListNotificationsResponse + (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest + (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest + (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest + (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest + (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse - (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 41: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata - (*Notification)(nil), // 46: google.storage.v2.Notification - (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption - (*Object)(nil), // 48: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount - (*Owner)(nil), // 53: google.storage.v2.Owner - (*ContentRange)(nil), // 54: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass - nil, // 68: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 73: google.storage.v2.Notification.CustomAttributesEntry - nil, // 74: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 77: google.protobuf.Duration - (*date.Date)(nil), // 78: google.type.Date - (*v1.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest - (*v1.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest - (*v1.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 82: google.protobuf.Empty - (*v1.Policy)(nil), // 83: google.iam.v1.Policy - (*v1.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse + (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest + (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 44: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata + (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig + (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption + (*Object)(nil), // 51: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount + (*Owner)(nil), // 56: google.storage.v2.Owner + (*ContentRange)(nil), // 57: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass + nil, // 72: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry + nil, // 78: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 81: google.protobuf.Duration + (*date.Date)(nil), // 82: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 86: google.protobuf.Empty + (*iampb.Policy)(nil), // 87: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 46, // 6: google.storage.v2.CreateNotificationRequest.notification:type_name -> google.storage.v2.Notification - 46, // 7: google.storage.v2.ListNotificationsResponse.notifications:type_name -> google.storage.v2.Notification - 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 39, // 13: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 14: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 15: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 16: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 43, // 17: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 18: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 54, // 19: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 48, // 20: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 48, // 21: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 20, // 22: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 43, // 23: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 24: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 25: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 26: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 75, // 27: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 28: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 29: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 48, // 30: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 39, // 31: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 32: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 33: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 20, // 34: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 39, // 35: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 36: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 37: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 75, // 38: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 39, // 39: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 45, // 40: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 41: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 42: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 75, // 43: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 49, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 61, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 76, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 58, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 76, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 68, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 65, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 64, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 62, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 53, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 59, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 57, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 63, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 60, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 66, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 67, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 51, // 61: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 76, // 62: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 76, // 63: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 73, // 64: google.storage.v2.Notification.custom_attributes:type_name -> google.storage.v2.Notification.CustomAttributesEntry - 49, // 65: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 76, // 66: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 76, // 67: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 44, // 68: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 76, // 69: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 76, // 70: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 76, // 71: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 74, // 72: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 53, // 73: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 47, // 74: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 76, // 75: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 51, // 76: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 48, // 77: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 56, // 78: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 69, // 79: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 70, // 80: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 76, // 81: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 77, // 82: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 76, // 83: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 76, // 84: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 71, // 85: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 72, // 86: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 78, // 87: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 78, // 88: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 78, // 89: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 90: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 91: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 92: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 93: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 94: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 79, // 95: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 80, // 96: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 81, // 97: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 98: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 99: google.storage.v2.Storage.DeleteNotification:input_type -> google.storage.v2.DeleteNotificationRequest - 9, // 100: google.storage.v2.Storage.GetNotification:input_type -> google.storage.v2.GetNotificationRequest - 10, // 101: google.storage.v2.Storage.CreateNotification:input_type -> google.storage.v2.CreateNotificationRequest - 11, // 102: google.storage.v2.Storage.ListNotifications:input_type -> google.storage.v2.ListNotificationsRequest - 13, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 105: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 18, // 106: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 17, // 107: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 30, // 108: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 21, // 109: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 23, // 110: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 26, // 111: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 28, // 112: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 24, // 113: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 31, // 114: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 32, // 115: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 34, // 116: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 35, // 117: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 36, // 118: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 38, // 119: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 82, // 120: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 41, // 121: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 41, // 122: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 123: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 41, // 124: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 83, // 125: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 83, // 126: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 84, // 127: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 41, // 128: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 82, // 129: google.storage.v2.Storage.DeleteNotification:output_type -> google.protobuf.Empty - 46, // 130: google.storage.v2.Storage.GetNotification:output_type -> google.storage.v2.Notification - 46, // 131: google.storage.v2.Storage.CreateNotification:output_type -> google.storage.v2.Notification - 12, // 132: google.storage.v2.Storage.ListNotifications:output_type -> google.storage.v2.ListNotificationsResponse - 48, // 133: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 82, // 134: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 16, // 135: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 48, // 136: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 19, // 137: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 48, // 138: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 22, // 139: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 50, // 140: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 27, // 141: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 29, // 142: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 25, // 143: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 52, // 144: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 33, // 145: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 82, // 146: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 45, // 147: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 37, // 148: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 45, // 149: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 120, // [120:150] is the sub-list for method output_type - 90, // [90:120] is the sub-list for method input_type - 90, // [90:90] is the sub-list for extension type_name - 90, // [90:90] is the sub-list for extension extendee - 0, // [0:90] is the sub-list for field type_name + 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig + 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig + 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry + 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest + 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse + 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 132, // [132:164] is the sub-list for method output_type + 100, // [100:132] is the sub-list for method input_type + 100, // [100:100] is the sub-list for extension type_name + 100, // [100:100] is the sub-list for extension extendee + 0, // [0:100] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -8515,7 +9198,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNotificationRequest); i { + switch v := v.(*DeleteNotificationConfigRequest); i { case 0: return &v.state case 1: @@ -8527,7 +9210,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNotificationRequest); i { + switch v := v.(*GetNotificationConfigRequest); i { case 0: return &v.state case 1: @@ -8539,7 +9222,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNotificationRequest); i { + switch v := v.(*CreateNotificationConfigRequest); i { case 0: return &v.state case 1: @@ -8551,7 +9234,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationsRequest); i { + switch v := v.(*ListNotificationConfigsRequest); i { case 0: return &v.state case 1: @@ -8563,7 +9246,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNotificationsResponse); i { + switch v := v.(*ListNotificationConfigsResponse); i { case 0: return &v.state case 1: @@ -8599,7 +9282,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteRequest); i { + switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state case 1: @@ -8611,7 +9294,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteResponse); i { + switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8623,7 +9306,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectRequest); i { + switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8635,7 +9318,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectRequest); i { + switch v := v.(*ReadObjectRequest); i { case 0: return &v.state case 1: @@ -8647,7 +9330,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectResponse); i { + switch v := v.(*GetObjectRequest); i { case 0: return &v.state case 1: @@ -8659,7 +9342,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectSpec); i { + switch v := v.(*ReadObjectResponse); i { case 0: return &v.state case 1: @@ -8671,7 +9354,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectRequest); i { + switch v := v.(*WriteObjectSpec); i { case 0: return &v.state case 1: @@ -8683,7 +9366,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectResponse); i { + switch v := v.(*WriteObjectRequest); i { case 0: return &v.state case 1: @@ -8695,7 +9378,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsRequest); i { + switch v := v.(*WriteObjectResponse); i { case 0: return &v.state case 1: @@ -8707,7 +9390,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusRequest); i { + switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state case 1: @@ -8719,7 +9402,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusResponse); i { + switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state case 1: @@ -8731,7 +9414,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteObjectRequest); i { + switch v := v.(*ListObjectsRequest); i { case 0: return &v.state case 1: @@ -8743,7 +9426,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteResponse); i { + switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state case 1: @@ -8755,7 +9438,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteRequest); i { + switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state case 1: @@ -8767,7 +9450,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteResponse); i { + switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state case 1: @@ -8779,7 +9462,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateObjectRequest); i { + switch v := v.(*RewriteResponse); i { case 0: return &v.state case 1: @@ -8791,7 +9474,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceAccountRequest); i { + switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8803,7 +9486,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyRequest); i { + switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8815,7 +9498,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyResponse); i { + switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state case 1: @@ -8827,7 +9510,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteHmacKeyRequest); i { + switch v := v.(*GetServiceAccountRequest); i { case 0: return &v.state case 1: @@ -8839,7 +9522,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetHmacKeyRequest); i { + switch v := v.(*CreateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8851,7 +9534,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysRequest); i { + switch v := v.(*CreateHmacKeyResponse); i { case 0: return &v.state case 1: @@ -8863,7 +9546,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysResponse); i { + switch v := v.(*DeleteHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8875,7 +9558,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateHmacKeyRequest); i { + switch v := v.(*GetHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8887,7 +9570,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonObjectRequestParams); i { + switch v := v.(*ListHmacKeysRequest); i { case 0: return &v.state case 1: @@ -8899,7 +9582,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConstants); i { + switch v := v.(*ListHmacKeysResponse); i { case 0: return &v.state case 1: @@ -8911,7 +9594,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { + switch v := v.(*UpdateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8923,7 +9606,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketAccessControl); i { + switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state case 1: @@ -8935,7 +9618,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChecksummedData); i { + switch v := v.(*ServiceConstants); i { case 0: return &v.state case 1: @@ -8947,7 +9630,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectChecksums); i { + switch v := v.(*Bucket); i { case 0: return &v.state case 1: @@ -8959,7 +9642,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HmacKeyMetadata); i { + switch v := v.(*BucketAccessControl); i { case 0: return &v.state case 1: @@ -8971,7 +9654,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Notification); i { + switch v := v.(*ChecksummedData); i { case 0: return &v.state case 1: @@ -8983,7 +9666,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomerEncryption); i { + switch v := v.(*ObjectChecksums); i { case 0: return &v.state case 1: @@ -8995,7 +9678,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { + switch v := v.(*HmacKeyMetadata); i { case 0: return &v.state case 1: @@ -9007,7 +9690,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectAccessControl); i { + switch v := v.(*NotificationConfig); i { case 0: return &v.state case 1: @@ -9019,7 +9702,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsResponse); i { + switch v := v.(*CustomerEncryption); i { case 0: return &v.state case 1: @@ -9031,7 +9714,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProjectTeam); i { + switch v := v.(*Object); i { case 0: return &v.state case 1: @@ -9043,7 +9726,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceAccount); i { + switch v := v.(*ObjectAccessControl); i { case 0: return &v.state case 1: @@ -9055,7 +9738,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Owner); i { + switch v := v.(*ListObjectsResponse); i { case 0: return &v.state case 1: @@ -9067,7 +9750,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContentRange); i { + switch v := v.(*ProjectTeam); i { case 0: return &v.state case 1: @@ -9079,7 +9762,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject); i { + switch v := v.(*ServiceAccount); i { case 0: return &v.state case 1: @@ -9091,7 +9774,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + switch v := v.(*Owner); i { case 0: return &v.state case 1: @@ -9103,7 +9786,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Billing); i { + switch v := v.(*ContentRange); i { case 0: return &v.state case 1: @@ -9115,7 +9798,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Cors); i { + switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state case 1: @@ -9127,7 +9810,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Encryption); i { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state case 1: @@ -9139,7 +9822,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig); i { + switch v := v.(*Bucket_Billing); i { case 0: return &v.state case 1: @@ -9151,7 +9834,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle); i { + switch v := v.(*Bucket_Cors); i { case 0: return &v.state case 1: @@ -9163,7 +9846,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Logging); i { + switch v := v.(*Bucket_Encryption); i { case 0: return &v.state case 1: @@ -9175,7 +9858,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_RetentionPolicy); i { + switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state case 1: @@ -9187,7 +9870,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Versioning); i { + switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state case 1: @@ -9199,7 +9882,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Website); i { + switch v := v.(*Bucket_Logging); i { case 0: return &v.state case 1: @@ -9211,7 +9894,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_CustomPlacementConfig); i { + switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state case 1: @@ -9223,7 +9906,19 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Autoclass); i { + switch v := v.(*Bucket_SoftDeletePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Versioning); i { case 0: return &v.state case 1: @@ -9235,7 +9930,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + switch v := v.(*Bucket_Website); i { case 0: return &v.state case 1: @@ -9247,7 +9942,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule); i { + switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state case 1: @@ -9259,6 +9954,42 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -9270,7 +10001,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -9289,38 +10020,49 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{ + (*BidiWriteObjectRequest_UploadId)(nil), + (*BidiWriteObjectRequest_WriteObjectSpec)(nil), + (*BidiWriteObjectRequest_ChecksummedData)(nil), + } file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*BidiWriteObjectResponse_PersistedSize)(nil), + (*BidiWriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[62].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 74, + NumMessages: 78, NumExtensions: 0, NumServices: 1, }, @@ -9359,40 +10101,53 @@ type StorageClient interface { LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) // Gets the IAM policy for a specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) - // Permanently deletes a notification subscription. - DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // View a notification config. - GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) - // Retrieves a list of notification subscriptions for a given bucket. - ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) + // Permanently deletes a NotificationConfig. + DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // View a NotificationConfig. + GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) + // Creates a NotificationConfig for a given bucket. + // These NotificationConfigs, when triggered, publish messages to the + // specified Pub/Sub topics. See + // https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) + // Retrieves a list of NotificationConfigs for a given bucket. + ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter is used. + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // will fail. + // + // The behavior for currently in progress write operations is not guaranteed - + // they could either complete before the cancellation or fail if the + // cancellation completes first. CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error) @@ -9440,8 +10195,11 @@ type StorageClient interface { // returned `persisted_size`; in this case, the service will skip data at // offsets that were already persisted (without checking that it matches // the previously written data), and write only the data starting from the - // persisted offset. This behavior can make client-side handling simpler - // in some cases. + // persisted offset. Even though the data isn't written, it may still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. // // The service will not view the object as complete until the client has // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any @@ -9453,7 +10211,27 @@ type StorageClient interface { // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object's // metadata. + // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) // Retrieves a list of objects matching the criteria. ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides @@ -9544,8 +10322,8 @@ func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockB return out, nil } -func (c *storageClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -9553,8 +10331,8 @@ func (c *storageClient) GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyReq return out, nil } -func (c *storageClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) { - out := new(v1.Policy) +func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) { + out := new(iampb.Policy) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...) if err != nil { return nil, err @@ -9562,8 +10340,8 @@ func (c *storageClient) SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyReq return out, nil } -func (c *storageClient) TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) { - out := new(v1.TestIamPermissionsResponse) +func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) { + out := new(iampb.TestIamPermissionsResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...) if err != nil { return nil, err @@ -9580,36 +10358,36 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques return out, nil } -func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { +func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *storageClient) GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { - out := new(Notification) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotification", in, out, opts...) +func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { + out := new(NotificationConfig) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *storageClient) CreateNotification(ctx context.Context, in *CreateNotificationRequest, opts ...grpc.CallOption) (*Notification, error) { - out := new(Notification) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotification", in, out, opts...) +func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) { + out := new(NotificationConfig) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *storageClient) ListNotifications(ctx context.Context, in *ListNotificationsRequest, opts ...grpc.CallOption) (*ListNotificationsResponse, error) { - out := new(ListNotificationsResponse) - err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotifications", in, out, opts...) +func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) { + out := new(ListNotificationConfigsResponse) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...) if err != nil { return nil, err } @@ -9634,6 +10412,15 @@ func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectReques return out, nil } +func (c *storageClient) RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RestoreObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { out := new(CancelResumableWriteResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) @@ -9727,6 +10514,37 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) return m, nil } +func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageBidiWriteObjectClient{stream} + return x, nil +} + +type Storage_BidiWriteObjectClient interface { + Send(*BidiWriteObjectRequest) error + Recv() (*BidiWriteObjectResponse, error) + grpc.ClientStream +} + +type storageBidiWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageBidiWriteObjectClient) Send(m *BidiWriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectClient) Recv() (*BidiWriteObjectResponse, error) { + m := new(BidiWriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { out := new(ListObjectsResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) @@ -9831,40 +10649,53 @@ type StorageServer interface { LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) // Gets the IAM policy for a specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. - TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. + TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) - // Permanently deletes a notification subscription. - DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) - // View a notification config. - GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) - // Retrieves a list of notification subscriptions for a given bucket. - ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) + // Permanently deletes a NotificationConfig. + DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) + // View a NotificationConfig. + GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) + // Creates a NotificationConfig for a given bucket. + // These NotificationConfigs, when triggered, publish messages to the + // specified Pub/Sub topics. See + // https://cloud.google.com/storage/docs/pubsub-notifications. + CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) + // Retrieves a list of NotificationConfigs for a given bucket. + ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) // Concatenates a list of existing objects into a new object in the same // bucket. ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the `generation` parameter is used. + // Deletes an object and its metadata. + // + // Deletions are normally permanent when versioning is disabled or whenever + // the generation parameter is used. However, if soft delete is enabled for + // the bucket, deleted objects can be restored using RestoreObject until the + // soft delete retention period has passed. DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) // Cancels an in-progress resumable upload. + // + // Any attempts to write to the resumable upload after cancelling the upload + // will fail. + // + // The behavior for currently in progress write operations is not guaranteed - + // they could either complete before the cancellation or fail if the + // cancellation completes first. CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. GetObject(context.Context, *GetObjectRequest) (*Object, error) @@ -9912,8 +10743,11 @@ type StorageServer interface { // returned `persisted_size`; in this case, the service will skip data at // offsets that were already persisted (without checking that it matches // the previously written data), and write only the data starting from the - // persisted offset. This behavior can make client-side handling simpler - // in some cases. + // persisted offset. Even though the data isn't written, it may still + // incur a performance cost over resuming at the correct write offset. + // This behavior can make client-side handling simpler in some cases. + // - Clients must only send data that is a multiple of 256 KiB per message, + // unless the object is being finished with `finish_write` set to `true`. // // The service will not view the object as complete until the client has // sent a `WriteObjectRequest` with `finish_write` set to `true`. Sending any @@ -9925,7 +10759,27 @@ type StorageServer interface { // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object's // metadata. + // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. WriteObject(Storage_WriteObjectServer) error + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(Storage_BidiWriteObjectServer) error // Retrieves a list of objects matching the criteria. ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides @@ -9982,29 +10836,29 @@ func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequ func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented") } -func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") } -func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) { +func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") } -func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) { +func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") } func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") +func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented") } -func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNotification not implemented") +func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented") } -func (*UnimplementedStorageServer) CreateNotification(context.Context, *CreateNotificationRequest) (*Notification, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNotification not implemented") +func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented") } -func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotificationsRequest) (*ListNotificationsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNotifications not implemented") +func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented") } func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") @@ -10012,6 +10866,9 @@ func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObject func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") } +func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") +} func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") } @@ -10027,6 +10884,9 @@ func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRe func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") } +func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") +} func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") } @@ -10153,7 +11013,7 @@ func _Storage_LockBucketRetentionPolicy_Handler(srv interface{}, ctx context.Con } func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.GetIamPolicyRequest) + in := new(iampb.GetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -10165,13 +11025,13 @@ func _Storage_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec fun FullMethod: "/google.storage.v2.Storage/GetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetIamPolicy(ctx, req.(*v1.GetIamPolicyRequest)) + return srv.(StorageServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.SetIamPolicyRequest) + in := new(iampb.SetIamPolicyRequest) if err := dec(in); err != nil { return nil, err } @@ -10183,13 +11043,13 @@ func _Storage_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec fun FullMethod: "/google.storage.v2.Storage/SetIamPolicy", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).SetIamPolicy(ctx, req.(*v1.SetIamPolicyRequest)) + return srv.(StorageServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest)) } return interceptor(ctx, in, info, handler) } func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1.TestIamPermissionsRequest) + in := new(iampb.TestIamPermissionsRequest) if err := dec(in); err != nil { return nil, err } @@ -10201,7 +11061,7 @@ func _Storage_TestIamPermissions_Handler(srv interface{}, ctx context.Context, d FullMethod: "/google.storage.v2.Storage/TestIamPermissions", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).TestIamPermissions(ctx, req.(*v1.TestIamPermissionsRequest)) + return srv.(StorageServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest)) } return interceptor(ctx, in, info, handler) } @@ -10224,74 +11084,74 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _Storage_DeleteNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNotificationRequest) +func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNotificationConfigRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).DeleteNotification(ctx, in) + return srv.(StorageServer).DeleteNotificationConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/DeleteNotification", + FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).DeleteNotification(ctx, req.(*DeleteNotificationRequest)) + return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest)) } return interceptor(ctx, in, info, handler) } -func _Storage_GetNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNotificationRequest) +func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNotificationConfigRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).GetNotification(ctx, in) + return srv.(StorageServer).GetNotificationConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/GetNotification", + FullMethod: "/google.storage.v2.Storage/GetNotificationConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).GetNotification(ctx, req.(*GetNotificationRequest)) + return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest)) } return interceptor(ctx, in, info, handler) } -func _Storage_CreateNotification_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNotificationRequest) +func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNotificationConfigRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).CreateNotification(ctx, in) + return srv.(StorageServer).CreateNotificationConfig(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/CreateNotification", + FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).CreateNotification(ctx, req.(*CreateNotificationRequest)) + return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest)) } return interceptor(ctx, in, info, handler) } -func _Storage_ListNotifications_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNotificationsRequest) +func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNotificationConfigsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(StorageServer).ListNotifications(ctx, in) + return srv.(StorageServer).ListNotificationConfigs(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/google.storage.v2.Storage/ListNotifications", + FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(StorageServer).ListNotifications(ctx, req.(*ListNotificationsRequest)) + return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest)) } return interceptor(ctx, in, info, handler) } @@ -10332,6 +11192,24 @@ func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _Storage_RestoreObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RestoreObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RestoreObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RestoreObject(ctx, req.(*RestoreObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CancelResumableWriteRequest) if err := dec(in); err != nil { @@ -10433,6 +11311,32 @@ func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { return m, nil } +func _Storage_BidiWriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).BidiWriteObject(&storageBidiWriteObjectServer{stream}) +} + +type Storage_BidiWriteObjectServer interface { + Send(*BidiWriteObjectResponse) error + Recv() (*BidiWriteObjectRequest, error) + grpc.ServerStream +} + +type storageBidiWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageBidiWriteObjectServer) Send(m *BidiWriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectServer) Recv() (*BidiWriteObjectRequest, error) { + m := new(BidiWriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListObjectsRequest) if err := dec(in); err != nil { @@ -10654,20 +11558,20 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ Handler: _Storage_UpdateBucket_Handler, }, { - MethodName: "DeleteNotification", - Handler: _Storage_DeleteNotification_Handler, + MethodName: "DeleteNotificationConfig", + Handler: _Storage_DeleteNotificationConfig_Handler, }, { - MethodName: "GetNotification", - Handler: _Storage_GetNotification_Handler, + MethodName: "GetNotificationConfig", + Handler: _Storage_GetNotificationConfig_Handler, }, { - MethodName: "CreateNotification", - Handler: _Storage_CreateNotification_Handler, + MethodName: "CreateNotificationConfig", + Handler: _Storage_CreateNotificationConfig_Handler, }, { - MethodName: "ListNotifications", - Handler: _Storage_ListNotifications_Handler, + MethodName: "ListNotificationConfigs", + Handler: _Storage_ListNotificationConfigs_Handler, }, { MethodName: "ComposeObject", @@ -10677,6 +11581,10 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteObject", Handler: _Storage_DeleteObject_Handler, }, + { + MethodName: "RestoreObject", + Handler: _Storage_RestoreObject_Handler, + }, { MethodName: "CancelResumableWrite", Handler: _Storage_CancelResumableWrite_Handler, @@ -10741,6 +11649,12 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ Handler: _Storage_WriteObject_Handler, ClientStreams: true, }, + { + StreamName: "BidiWriteObject", + Handler: _Storage_BidiWriteObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "google/storage/v2/storage.proto", } diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index a08cb7cabc..eca9b294aa 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.29.0" +const Version = "1.35.1" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index 810d64285d..dc79fd88bb 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "net" - "net/http" "net/url" "strings" @@ -29,6 +28,7 @@ import ( sinternal "cloud.google.com/go/storage/internal" "github.com/google/uuid" gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" "google.golang.org/api/googleapi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -37,10 +37,15 @@ import ( var defaultRetry *retryConfig = &retryConfig{} var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) +const ( + xGoogHeaderKey = "x-goog-api-client" + idempotencyHeaderKey = "x-goog-gcs-idempotency-token" +) + // run determines whether a retry is necessary based on the config and // idempotency information. It then calls the function with or without retries // as appropriate, using the configured settings. -func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { +func run(ctx context.Context, call func(ctx context.Context) error, retry *retryConfig, isIdempotent bool) error { attempts := 1 invocationID := uuid.New().String() @@ -48,8 +53,8 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten retry = defaultRetry } if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { - setHeader(invocationID, attempts) - return call() + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + return call(ctxWithHeaders) } bo := gax.Backoff{} if retry.backoff != nil { @@ -63,30 +68,22 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten } return internal.Retry(ctx, bo, func() (stop bool, err error) { - setHeader(invocationID, attempts) - err = call() + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + err = call(ctxWithHeaders) attempts++ return !errorFunc(err), err }) } -func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { - return func(invocationID string, attempts int) { - if req == nil { - return - } - header := req.Header() - invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) - xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - header.Set("x-goog-api-client", xGoogHeader) - } -} +// Sets invocation ID headers on the context which will be propagated as +// headers in the call to the service (for both gRPC and HTTP). +func setInvocationHeaders(ctx context.Context, invocationID string, attempts int) context.Context { + invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") -// TODO: Implement method setting header via context for gRPC -func setRetryHeaderGRPC(_ context.Context) func(string, int) { - return func(_ string, _ int) { - return - } + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) + return ctx } // ShouldRetry returns true if an error is retryable, based on best practice @@ -131,12 +128,11 @@ func ShouldRetry(err error) bool { return true } } - // HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per - // https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html. - // - // This is only necessary for the experimental gRPC-based media operations. - if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable { - return true + // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC. + if st, ok := status.FromError(err); ok { + if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal { + return true + } } // Unwrap is only supported in go1.13.x+ if e, ok := err.(interface{ Unwrap() error }); ok { diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go index 614feb7b6d..56f3e3daa5 100644 --- a/vendor/cloud.google.com/go/storage/notifications.go +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -21,7 +21,7 @@ import ( "regexp" "cloud.google.com/go/internal/trace" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" raw "google.golang.org/api/storage/v1" ) @@ -92,7 +92,7 @@ func toNotification(rn *raw.Notification) *Notification { return n } -func toNotificationFromProto(pbn *storagepb.Notification) *Notification { +func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification { n := &Notification{ ID: pbn.GetName(), EventTypes: pbn.GetEventTypes(), @@ -104,8 +104,8 @@ func toNotificationFromProto(pbn *storagepb.Notification) *Notification { return n } -func toProtoNotification(n *Notification) *storagepb.Notification { - return &storagepb.Notification{ +func toProtoNotification(n *Notification) *storagepb.NotificationConfig { + return &storagepb.NotificationConfig{ Name: n.ID, Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", n.TopicProjectID, n.TopicID), @@ -182,7 +182,7 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification { return m } -func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification { +func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification { m := map[string]*Notification{} for _, n := range ns { m[n.Name] = toNotificationFromProto(n) diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go new file mode 100644 index 0000000000..e72ceb78f0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/option.go @@ -0,0 +1,75 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" +) + +// storageConfig contains the Storage client option configuration that can be +// set through storageClientOptions. +type storageConfig struct { + useJSONforReads bool + readAPIWasSet bool +} + +// newStorageConfig generates a new storageConfig with all the given +// storageClientOptions applied. +func newStorageConfig(opts ...option.ClientOption) storageConfig { + var conf storageConfig + for _, opt := range opts { + if storageOpt, ok := opt.(storageClientOption); ok { + storageOpt.ApplyStorageOpt(&conf) + } + } + return conf +} + +// A storageClientOption is an option for a Google Storage client. +type storageClientOption interface { + option.ClientOption + ApplyStorageOpt(*storageConfig) +} + +// WithJSONReads is an option that may be passed to a Storage Client on creation. +// It sets the client to use the JSON API for object reads. Currently, the +// default API used for reads is XML. +// Setting this option is required to use the GenerationNotMatch condition. +// +// Note that when this option is set, reads will return a zero date for +// [ReaderObjectAttrs].LastModified and may return a different value for +// [ReaderObjectAttrs].CacheControl. +func WithJSONReads() option.ClientOption { + return &withReadAPI{useJSON: true} +} + +// WithXMLReads is an option that may be passed to a Storage Client on creation. +// It sets the client to use the XML API for object reads. +// +// This is the current default. +func WithXMLReads() option.ClientOption { + return &withReadAPI{useJSON: false} +} + +type withReadAPI struct { + internaloption.EmbeddableAdapter + useJSON bool +} + +func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) { + c.useJSONforReads = w.useJSON + c.readAPIWasSet = true +} diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index 2961aca206..6bc73fb7af 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -32,7 +32,7 @@ import ( // Please see https://cloud.google.com/storage/docs/xml-api/post-object // for reference about the fields. type PostPolicyV4Options struct { - // GoogleAccessID represents the authorizer of the signed URL generation. + // GoogleAccessID represents the authorizer of the signed post policy generation. // It is typically the Google service account client email address from // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". // Required. @@ -85,7 +85,7 @@ type PostPolicyV4Options struct { // Exactly one of PrivateKey or SignRawBytes must be non-nil. SignRawBytes func(bytes []byte) (signature []byte, err error) - // Expires is the expiration time on the signed URL. + // Expires is the expiration time on the signed post policy. // It must be a time in the future. // Required. Expires time.Time @@ -113,6 +113,12 @@ type PostPolicyV4Options struct { // Optional. Conditions []PostPolicyV4Condition + // Hostname sets the host of the signed post policy. This field overrides + // any endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. + // Only compatible with PathStyle URLStyle. + // Optional. + Hostname string + shouldHashSignBytes bool } @@ -128,6 +134,7 @@ func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options { Fields: opts.Fields, Conditions: opts.Conditions, shouldHashSignBytes: opts.shouldHashSignBytes, + Hostname: opts.Hostname, } } @@ -370,7 +377,7 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options u := &url.URL{ Path: path, RawPath: pathEncodeV4(path), - Host: opts.Style.host(bucket), + Host: opts.Style.host(opts.Hostname, bucket), Scheme: scheme, } diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 46487d2b77..4673a68d07 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -87,8 +87,9 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // that file will be served back whole, regardless of the requested range as // Google Cloud Storage dictates. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { - ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") - defer func() { trace.EndSpan(ctx, err) }() + // This span covers the life of the reader. It is closed via the context + // in Reader.Close. + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Reader") if err := o.validate(); err != nil { return nil, err @@ -117,6 +118,14 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) r, err = o.c.tc.NewRangeReader(ctx, params, opts...) + // Pass the context so that the span can be closed in Reader.Close, or close the + // span now if there is an error. + if err == nil { + r.ctx = ctx + } else { + trace.EndSpan(ctx, err) + } + return r, err } @@ -139,15 +148,23 @@ func uncompressedByServer(res *http.Response) bool { res.Header.Get("Content-Encoding") != "gzip" } +// parseCRC32c parses the crc32c hash from the X-Goog-Hash header. +// It can parse headers in the form [crc32c=xxx md5=xxx] (XML responses) or the +// form [crc32c=xxx,md5=xxx] (JSON responses). The md5 hash is ignored. func parseCRC32c(res *http.Response) (uint32, bool) { const prefix = "crc32c=" for _, spec := range res.Header["X-Goog-Hash"] { - if strings.HasPrefix(spec, prefix) { - c, err := decodeUint32(spec[len(prefix):]) - if err == nil { - return c, true + values := strings.Split(spec, ",") + + for _, v := range values { + if strings.HasPrefix(v, prefix) { + c, err := decodeUint32(v[len(prefix):]) + if err == nil { + return c, true + } } } + } return 0, false } @@ -170,16 +187,6 @@ func setConditionsHeaders(headers http.Header, conds *Conditions) error { return nil } -// Wrap a request to look similar to an apiary library request, in order to -// be used by run(). -type readerRequestWrapper struct { - req *http.Request -} - -func (w *readerRequestWrapper) Header() http.Header { - return w.req.Header -} - var emptyBody = ioutil.NopCloser(strings.NewReader("")) // Reader reads a Cloud Storage object. @@ -196,11 +203,14 @@ type Reader struct { gotCRC uint32 // running crc reader io.ReadCloser + ctx context.Context } // Close closes the Reader. It must be called when done reading. func (r *Reader) Close() error { - return r.reader.Close() + err := r.reader.Close() + trace.EndSpan(r.ctx, err) + return err } func (r *Reader) Read(p []byte) (int, error) { diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 7fc3fc4cb9..a16e512f5e 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -41,7 +41,7 @@ import ( "cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/trace" "cloud.google.com/go/storage/internal" - storagepb "cloud.google.com/go/storage/internal/apiv2/stubs" + "cloud.google.com/go/storage/internal/apiv2/storagepb" "github.com/googleapis/gax-go/v2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" @@ -109,8 +109,8 @@ type Client struct { raw *raw.Service // Scheme describes the scheme under the current host. scheme string - // ReadHost is the default host used on the reader. - readHost string + // xmlHost is the default host used for XML requests. + xmlHost string // May be nil. creds *google.Credentials retry *retryConfig @@ -123,20 +123,16 @@ type Client struct { useGRPC bool } -// NewClient creates a new Google Cloud Storage client. +// NewClient creates a new Google Cloud Storage client using the HTTP transport. // The default scope is ScopeFullControl. To use a different scope, like // ScopeReadOnly, use option.WithScopes. // // Clients should be reused instead of created as needed. The methods of Client // are safe for concurrent use by multiple goroutines. +// +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. You may also use options defined in this package, such as [WithJSONReads]. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - - // Use the experimental gRPC client if the env var is set. - // This is an experimental API and not intended for public use. - if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { - return newGRPCClient(ctx, opts...) - } - var creds *google.Credentials // In general, it is recommended to use raw.NewService instead of htransport.NewClient @@ -179,10 +175,12 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error endpoint := hostURL.String() // Append the emulator host as default endpoint for the user - opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) - - opts = append(opts, internaloption.WithDefaultEndpoint(endpoint)) - opts = append(opts, internaloption.WithDefaultMTLSEndpoint(endpoint)) + opts = append([]option.ClientOption{ + option.WithoutAuthentication(), + internaloption.SkipDialSettingsValidation(), + internaloption.WithDefaultEndpoint(endpoint), + internaloption.WithDefaultMTLSEndpoint(endpoint), + }, opts...) } // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. @@ -195,7 +193,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error if err != nil { return nil, fmt.Errorf("storage client: %w", err) } - // Update readHost and scheme with the chosen endpoint. + // Update xmlHost and scheme with the chosen endpoint. u, err := url.Parse(ep) if err != nil { return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err) @@ -207,20 +205,29 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error } return &Client{ - hc: hc, - raw: rawService, - scheme: u.Scheme, - readHost: u.Host, - creds: creds, - tc: tc, + hc: hc, + raw: rawService, + scheme: u.Scheme, + xmlHost: u.Host, + creds: creds, + tc: tc, }, nil } -// newGRPCClient creates a new Storage client that initializes a gRPC-based -// client. Calls that have not been implemented in gRPC will panic. +// NewGRPCClient creates a new Storage client using the gRPC transport and API. +// Client methods which have not been implemented in gRPC will return an error. +// In particular, methods for Cloud Pub/Sub notifications are not supported. +// +// The storage gRPC API is still in preview and not yet publicly available. +// If you would like to use the API, please first contact your GCP account rep to +// request access. The API may be subject to breaking changes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. // -// This is an experimental API and not intended for public use. -func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. +func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { opts = append(defaultGRPCOptions(), opts...) tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { @@ -258,13 +265,13 @@ const ( SigningSchemeV4 ) -// URLStyle determines the style to use for the signed URL. pathStyle is the +// URLStyle determines the style to use for the signed URL. PathStyle is the // default. All non-default options work with V4 scheme only. See // https://cloud.google.com/storage/docs/request-endpoints for details. type URLStyle interface { // host should return the host portion of the signed URL, not including // the scheme (e.g. storage.googleapis.com). - host(bucket string) string + host(hostname, bucket string) string // path should return the path portion of the signed URL, which may include // both the bucket and object name or only the object name depending on the @@ -280,7 +287,11 @@ type bucketBoundHostname struct { hostname string } -func (s pathStyle) host(bucket string) string { +func (s pathStyle) host(hostname, bucket string) string { + if hostname != "" { + return stripScheme(hostname) + } + if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { return stripScheme(host) } @@ -288,7 +299,7 @@ func (s pathStyle) host(bucket string) string { return "storage.googleapis.com" } -func (s virtualHostedStyle) host(bucket string) string { +func (s virtualHostedStyle) host(_, bucket string) string { if host := os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { return bucket + "." + stripScheme(host) } @@ -296,7 +307,7 @@ func (s virtualHostedStyle) host(bucket string) string { return bucket + ".storage.googleapis.com" } -func (s bucketBoundHostname) host(bucket string) string { +func (s bucketBoundHostname) host(_, bucket string) string { return s.hostname } @@ -317,7 +328,10 @@ func (s bucketBoundHostname) path(bucket, object string) string { } // PathStyle is the default style, and will generate a URL of the form -// "storage.googleapis.com//". +// "//". By default, is +// storage.googleapis.com, but setting an endpoint on the storage Client or +// through STORAGE_EMULATOR_HOST overrides this. Setting Hostname on +// SignedURLOptions or PostPolicyV4Options overrides everything else. func PathStyle() URLStyle { return pathStyle{} } @@ -438,6 +452,12 @@ type SignedURLOptions struct { // Scheme determines the version of URL signing to use. Default is // SigningSchemeV2. Scheme SigningScheme + + // Hostname sets the host of the signed URL. This field overrides any + // endpoint set on a storage Client or through STORAGE_EMULATOR_HOST. + // Only compatible with PathStyle URLStyle. + // Optional. + Hostname string } func (opts *SignedURLOptions) clone() *SignedURLOptions { @@ -454,6 +474,7 @@ func (opts *SignedURLOptions) clone() *SignedURLOptions { Style: opts.Style, Insecure: opts.Insecure, Scheme: opts.Scheme, + Hostname: opts.Hostname, } } @@ -535,7 +556,7 @@ func v4SanitizeHeaders(hdrs []string) []string { sanitizedHeader := strings.TrimSpace(hdr) var key, value string - headerMatches := strings.Split(sanitizedHeader, ":") + headerMatches := strings.SplitN(sanitizedHeader, ":", 2) if len(headerMatches) < 2 { continue } @@ -649,7 +670,7 @@ var utcNow = func() time.Time { func extractHeaderNames(kvs []string) []string { var res []string for _, header := range kvs { - nameValue := strings.Split(header, ":") + nameValue := strings.SplitN(header, ":", 2) res = append(res, nameValue[0]) } return res @@ -712,7 +733,7 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st fmt.Fprintf(buf, "%s\n", escapedQuery) // Fill in the hostname based on the desired URL style. - u.Host = opts.Style.host(bucket) + u.Host = opts.Style.host(opts.Hostname, bucket) // Fill in the URL scheme. if opts.Insecure { @@ -793,7 +814,7 @@ func sortHeadersByKey(hdrs []string) []string { headersMap := map[string]string{} var headersKeys []string for _, h := range hdrs { - parts := strings.Split(h, ":") + parts := strings.SplitN(h, ":", 2) k := parts[0] v := parts[1] headersMap[k] = v @@ -846,7 +867,7 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { } encoded := base64.StdEncoding.EncodeToString(b) u.Scheme = "https" - u.Host = PathStyle().host(bucket) + u.Host = PathStyle().host(opts.Hostname, bucket) q := u.Query() q.Set("GoogleAccessId", opts.GoogleAccessID) q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) @@ -889,7 +910,9 @@ func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { } // If returns a new ObjectHandle that applies a set of preconditions. -// Preconditions already set on the ObjectHandle are ignored. +// Preconditions already set on the ObjectHandle are ignored. The supplied +// Conditions must have at least one field set to a non-default value; +// otherwise an error will be returned from any operation on the ObjectHandle. // Operations on the new handle will return an error if the preconditions are not // satisfied. See https://cloud.google.com/storage/docs/generations-preconditions // for more details. @@ -1016,6 +1039,7 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { // It is the caller's responsibility to call Close when writing is done. To // stop writing without saving the data, cancel the context. func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Writer") return &Writer{ ctx: ctx, o: o, @@ -1159,7 +1183,7 @@ func (uattrs *ObjectAttrsToUpdate) toProtoObject(bucket, object string) *storage o.Acl = toProtoObjectACL(uattrs.ACL) } - // TODO(cathyo): Handle metadata. Pending b/230510191. + o.Metadata = uattrs.Metadata return o } @@ -1480,6 +1504,8 @@ type Query struct { // aside from the prefix, contain delimiter will have their name, // truncated after the delimiter, returned in prefixes. // Duplicate prefixes are omitted. + // Must be set to / when used with the MatchGlob parameter to filter results + // in a directory-like mode. // Optional. Delimiter string @@ -1493,9 +1519,9 @@ type Query struct { Versions bool // attrSelection is used to select only specific fields to be returned by - // the query. It is set by the user calling calling SetAttrSelection. These + // the query. It is set by the user calling SetAttrSelection. These // are used by toFieldMask and toFieldSelection for gRPC and HTTP/JSON - // clients repsectively. + // clients respectively. attrSelection []string // StartOffset is used to filter results to objects whose names are @@ -1521,6 +1547,12 @@ type Query struct { // true, they will also be included as objects and their metadata will be // populated in the returned ObjectAttrs. IncludeTrailingDelimiter bool + + // MatchGlob is a glob pattern used to filter results (for example, foo*bar). See + // https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-object-glob + // for syntax details. When Delimiter is set in conjunction with MatchGlob, + // it must be set to /. + MatchGlob string } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1713,6 +1745,8 @@ type Conditions struct { // GenerationNotMatch specifies that the object must not have the given // generation for the operation to occur. // If GenerationNotMatch is zero, it has no effect. + // This condition only works for object reads if the WithJSONReads client + // option is set. GenerationNotMatch int64 // DoesNotExist specifies that the object must not exist in the bucket for @@ -1731,6 +1765,8 @@ type Conditions struct { // MetagenerationNotMatch specifies that the object must not have the given // metageneration for the operation to occur. // If MetagenerationNotMatch is zero, it has no effect. + // This condition only works for object reads if the WithJSONReads client + // option is set. MetagenerationNotMatch int64 } @@ -2154,8 +2190,6 @@ func toProjectResource(project string) string { // setConditionProtoField uses protobuf reflection to set named condition field // to the given condition value if supported on the protobuf message. -// -// This is an experimental API and not intended for public use. func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { fields := m.Descriptor().Fields() if rf := fields.ByName(protoreflect.Name(f)); rf != nil { @@ -2168,8 +2202,6 @@ func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { // applyCondsProto validates and attempts to set the conditions on a protobuf // message using protobuf reflection. -// -// This is an experimental API and not intended for public use. func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { rmsg := msg.ProtoReflect() diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 3a6a1ce0f5..aeb7ed418c 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -22,6 +22,8 @@ import ( "sync" "time" "unicode/utf8" + + "cloud.google.com/go/internal/trace" ) // A Writer writes a Cloud Storage object. @@ -86,7 +88,7 @@ type Writer struct { // cancellation. ChunkRetryDeadline time.Duration - // ProgressFunc can be used to monitor the progress of a large write. + // ProgressFunc can be used to monitor the progress of a large write // operation. If ProgressFunc is not nil and writing requires multiple // calls to the underlying service (see // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), @@ -163,6 +165,7 @@ func (w *Writer) Close() error { <-w.donec w.mu.Lock() defer w.mu.Unlock() + trace.EndSpan(w.ctx, w.err) return w.err } diff --git a/vendor/github.com/4meepo/tagalign/.gitignore b/vendor/github.com/4meepo/tagalign/.gitignore new file mode 100644 index 0000000000..e37bb52e49 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/.gitignore @@ -0,0 +1,75 @@ +# File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig +# Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,go +# Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,macos,go + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +.vscode + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### macOS Patch ### +# iCloud generated files +*.icloud + +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,go + +# Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) diff --git a/vendor/github.com/4meepo/tagalign/.golangci.yml b/vendor/github.com/4meepo/tagalign/.golangci.yml new file mode 100644 index 0000000000..e65f604fe1 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/.golangci.yml @@ -0,0 +1,106 @@ +# See https://golangci-lint.run/usage/configuration/ + +linters-settings: + revive: + # see https://github.com/mgechev/revive#available-rules for details. + ignore-generated-header: true + severity: warning + rules: + - name: atomic + - name: blank-imports + - name: bool-literal-in-expr + - name: call-to-gc + - name: confusing-naming + - name: confusing-results + - name: constant-logical-expr + - name: context-as-argument + - name: context-keys-type + - name: deep-exit + - name: defer + - name: dot-imports + - name: duplicated-imports + - name: early-return + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: get-return + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + - name: modifies-parameter + - name: modifies-value-receiver + - name: package-comments + - name: range + - name: range-val-address + - name: range-val-in-closure + - name: receiver-naming + - name: redefines-builtin-id + - name: string-of-int + - name: superfluous-else + - name: time-naming + - name: unconditional-recursion + - name: unexported-naming + - name: unexported-return + - name: unnecessary-stmt + - name: unreachable-code + - name: unused-parameter + - name: var-declaration + - name: var-naming + - name: waitgroup-by-value + +linters: + disable-all: true + enable: + - asciicheck + - bodyclose + - dogsled + - dupl + - durationcheck + - errcheck + - errorlint + - exhaustive + - exportloopref + - forcetypeassert + - gochecknoinits + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - godox + - goimports + - gomoddirectives + - gomodguard + - goprintffuncname + - gosec + - gosimple + # - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nestif + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - stylecheck + - thelper + - tparallel + - typecheck + - unconvert + - unparam + - unused + - whitespace diff --git a/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/vendor/github.com/4meepo/tagalign/.goreleaser.yml new file mode 100644 index 0000000000..e7b6f6800e --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/.goreleaser.yml @@ -0,0 +1,32 @@ +--- +project_name: tagalign + +release: + github: + owner: 4meepo + name: tagalign + +builds: + - binary: tagalign + goos: + - darwin + - windows + - linux + - freebsd + goarch: + - amd64 + - arm64 + - arm + goarm: + - 6 + - 7 + gomips: + - hardfloat + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm64 + main: ./cmd/tagalign/ \ No newline at end of file diff --git a/vendor/github.com/mgechev/dots/LICENSE b/vendor/github.com/4meepo/tagalign/LICENSE similarity index 97% rename from vendor/github.com/mgechev/dots/LICENSE rename to vendor/github.com/4meepo/tagalign/LICENSE index c617c7e012..da3ae82706 100644 --- a/vendor/github.com/mgechev/dots/LICENSE +++ b/vendor/github.com/4meepo/tagalign/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 Minko Gechev +Copyright (c) 2023 Yifei Liu Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/4meepo/tagalign/Makefile b/vendor/github.com/4meepo/tagalign/Makefile new file mode 100644 index 0000000000..614e7773c3 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/Makefile @@ -0,0 +1,7 @@ +.PHONY: lint +lint: + golangci-lint run ./... + +.PHONY: build +build: + go build -o tagalign cmd/tagalign/tagalign.go \ No newline at end of file diff --git a/vendor/github.com/4meepo/tagalign/README.md b/vendor/github.com/4meepo/tagalign/README.md new file mode 100644 index 0000000000..9d04dccbf2 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/README.md @@ -0,0 +1,130 @@ +# Go Tag Align + +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/4meepo/tagalign?style=flat-square) +[![codecov](https://codecov.io/github/4meepo/tagalign/branch/main/graph/badge.svg?token=1R1T61UNBQ)](https://codecov.io/github/4meepo/tagalign) +[![GoDoc](https://godoc.org/github.com/4meepo/tagalign?status.svg)](https://pkg.go.dev/github.com/4meepo/tagalign) +[![Go Report Card](https://goreportcard.com/badge/github.com/4meepo/tagalign)](https://goreportcard.com/report/github.com/4meepo/tagalign) + +TagAlign is used to align and sort tags in Go struct. It can make the struct more readable and easier to maintain. + +For example, this struct + +```go +type FooBar struct { + Foo int `json:"foo" validate:"required"` + Bar string `json:"bar" validate:"required"` + FooFoo int8 `json:"foo_foo" validate:"required"` + BarBar int `json:"bar_bar" validate:"required"` + FooBar struct { + Foo int `json:"foo" yaml:"foo" validate:"required"` + Bar222 string `json:"bar222" validate:"required" yaml:"bar"` + } `json:"foo_bar" validate:"required"` + BarFoo string `json:"bar_foo" validate:"required"` + BarFooBar string `json:"bar_foo_bar" validate:"required"` +} +``` + +can be aligned to: + +```go +type FooBar struct { + Foo int `json:"foo" validate:"required"` + Bar string `json:"bar" validate:"required"` + FooFoo int8 `json:"foo_foo" validate:"required"` + BarBar int `json:"bar_bar" validate:"required"` + FooBar struct { + Foo int `json:"foo" yaml:"foo" validate:"required"` + Bar222 string `json:"bar222" validate:"required" yaml:"bar"` + } `json:"foo_bar" validate:"required"` + BarFoo string `json:"bar_foo" validate:"required"` + BarFooBar string `json:"bar_foo_bar" validate:"required"` +} +``` + +## Usage + +By default tagalign will only align tags, but not sort them. But alignment and [sort feature](https://github.com/4meepo/tagalign#sort-tag) can work together or separately. + +* As a Golangci Linter (Recommended) + + Tagalign is a built-in linter in [Golangci Lint](https://golangci-lint.run/usage/linters/#tagalign) since `v1.53`. + > Note: In order to have the best experience, add the `--fix` flag to `golangci-lint` to enable the autofix feature. + +* Standalone Mode + + Install it using `GO` or download it [here](https://github.com/4meepo/tagalign/releases). + + ```bash + go install github.com/4meepo/tagalign/cmd/tagalign@latest + ``` + + Run it in your terminal. + + ```bash + # Only align tags. + tagalign -fix {package path} + # Only sort tags with fixed order. + tagalign -fix -noalign -sort -order "json,xml" {package path} + # Align and sort together. + tagalign -fix -sort -order "json,xml" {package path} + # Align and sort together in strict style. + tagalign -fix -sort -order "json,xml" -strict {package path} + ``` + +## Advanced Features + +### Sort Tag + +In addition to alignment, it can also sort tags with fixed order. If we enable sort with fixed order `json,xml`, the following code + +```go +type SortExample struct { + Foo int `json:"foo,omitempty" yaml:"bar" xml:"baz" binding:"required" gorm:"column:foo" zip:"foo" validate:"required"` + Bar int `validate:"required" yaml:"foo" xml:"bar" binding:"required" json:"bar,omitempty" gorm:"column:bar" zip:"bar" ` + FooBar int `gorm:"column:bar" validate:"required" xml:"bar" binding:"required" json:"bar,omitempty" zip:"bar" yaml:"foo"` +} +``` + +will be sorted and aligned to: + +```go +type SortExample struct { + Foo int `json:"foo,omitempty" xml:"baz" binding:"required" gorm:"column:foo" validate:"required" yaml:"bar" zip:"foo"` + Bar int `json:"bar,omitempty" xml:"bar" binding:"required" gorm:"column:bar" validate:"required" yaml:"foo" zip:"bar"` + FooBar int `json:"bar,omitempty" xml:"bar" binding:"required" gorm:"column:bar" validate:"required" yaml:"foo" zip:"bar"` +} +``` + +The fixed order is `json,xml`, so the tags `json` and `xml` will be sorted and aligned first, and the rest tags will be sorted and aligned in the dictionary order. + +### Strict Style + +Sometimes, you may want to align your tags in strict style. In this style, the tags will be sorted and aligned in the dictionary order, and the tags with the same name will be aligned together. For example, the following code + +```go +type StrictStyleExample struct { + Foo int ` xml:"baz" yaml:"bar" zip:"foo" binding:"required" gorm:"column:foo" validate:"required"` + Bar int `validate:"required" gorm:"column:bar" yaml:"foo" xml:"bar" binding:"required" json:"bar,omitempty" ` +} +``` + +will be aligned to + +```go +type StrictStyleExample struct { + Foo int `binding:"required" gorm:"column:foo" validate:"required" xml:"baz" yaml:"bar" zip:"foo"` + Bar int `binding:"required" gorm:"column:bar" json:"bar,omitempty" validate:"required" xml:"bar" yaml:"foo"` +} +``` + +> ⚠️Note: The strict style can't run without the align or sort feature enabled. + +## References + +[Golang AST Visualizer](http://goast.yuroyoro.net/) + +[Create New Golang CI Linter](https://golangci-lint.run/contributing/new-linters/) + +[Autofix Example](https://github.com/golangci/golangci-lint/pull/2450/files) + +[Integrating](https://disaev.me/p/writing-useful-go-analysis-linter/#integrating) diff --git a/vendor/github.com/4meepo/tagalign/options.go b/vendor/github.com/4meepo/tagalign/options.go new file mode 100644 index 0000000000..ddec98da73 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/options.go @@ -0,0 +1,37 @@ +package tagalign + +type Option func(*Helper) + +// WithMode specify the mode of tagalign. +func WithMode(mode Mode) Option { + return func(h *Helper) { + h.mode = mode + } +} + +// WithSort enable tags sort. +// fixedOrder specify the order of tags, the other tags will be sorted by name. +// Sory is disabled by default. +func WithSort(fixedOrder ...string) Option { + return func(h *Helper) { + h.sort = true + h.fixedTagOrder = fixedOrder + } +} + +// WithAlign configure whether enable tags align. +// Align is enabled by default. +func WithAlign(enabled bool) Option { + return func(h *Helper) { + h.align = enabled + } +} + +// WithStrictStyle configure whether enable strict style. +// StrictStyle is disabled by default. +// Note: StrictStyle must be used with WithAlign(true) and WithSort(...) together, or it will be ignored. +func WithStrictStyle() Option { + return func(h *Helper) { + h.style = StrictStyle + } +} diff --git a/vendor/github.com/4meepo/tagalign/tagalign.go b/vendor/github.com/4meepo/tagalign/tagalign.go new file mode 100644 index 0000000000..4734b56661 --- /dev/null +++ b/vendor/github.com/4meepo/tagalign/tagalign.go @@ -0,0 +1,459 @@ +package tagalign + +import ( + "fmt" + "go/ast" + "go/token" + "log" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/fatih/structtag" + + "golang.org/x/tools/go/analysis" +) + +type Mode int + +const ( + StandaloneMode Mode = iota + GolangciLintMode +) + +type Style int + +const ( + DefaultStyle Style = iota + StrictStyle +) + +const ( + errTagValueSyntax = "bad syntax for struct tag value" +) + +func NewAnalyzer(options ...Option) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "tagalign", + Doc: "check that struct tags are well aligned", + Run: func(p *analysis.Pass) (any, error) { + Run(p, options...) + return nil, nil + }, + } +} + +func Run(pass *analysis.Pass, options ...Option) []Issue { + var issues []Issue + for _, f := range pass.Files { + h := &Helper{ + mode: StandaloneMode, + style: DefaultStyle, + align: true, + } + for _, opt := range options { + opt(h) + } + + // StrictStyle must be used with WithAlign(true) and WithSort(...) together, or it will be ignored. + if h.style == StrictStyle && (!h.align || !h.sort) { + h.style = DefaultStyle + } + + if !h.align && !h.sort { + // do nothing + return nil + } + + ast.Inspect(f, func(n ast.Node) bool { + h.find(pass, n) + return true + }) + h.Process(pass) + issues = append(issues, h.issues...) + } + return issues +} + +type Helper struct { + mode Mode + + style Style + + align bool // whether enable tags align. + sort bool // whether enable tags sort. + fixedTagOrder []string // the order of tags, the other tags will be sorted by name. + + singleFields []*ast.Field + consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct. + issues []Issue +} + +// Issue is used to integrate with golangci-lint's inline auto fix. +type Issue struct { + Pos token.Position + Message string + InlineFix InlineFix +} +type InlineFix struct { + StartCol int // zero-based + Length int + NewString string +} + +func (w *Helper) find(pass *analysis.Pass, n ast.Node) { + v, ok := n.(*ast.StructType) + if !ok { + return + } + + fields := v.Fields.List + if len(fields) == 0 { + return + } + + fs := make([]*ast.Field, 0) + split := func() { + n := len(fs) + if n > 1 { + w.consecutiveFieldsGroups = append(w.consecutiveFieldsGroups, fs) + } else if n == 1 { + w.singleFields = append(w.singleFields, fs[0]) + } + + fs = nil + } + + for i, field := range fields { + if field.Tag == nil { + // field without tags + split() + continue + } + + if i > 0 { + if fields[i-1].Tag == nil { + // if previous filed do not have a tag + fs = append(fs, field) + continue + } + preLineNum := pass.Fset.Position(fields[i-1].Tag.Pos()).Line + lineNum := pass.Fset.Position(field.Tag.Pos()).Line + if lineNum-preLineNum > 1 { + // fields with tags are not consecutive, including two case: + // 1. splited by lines + // 2. splited by a struct + split() + + // check if the field is a struct + if _, ok := field.Type.(*ast.StructType); ok { + continue + } + } + } + + fs = append(fs, field) + } + + split() +} + +func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) { + if w.mode == GolangciLintMode { + iss := Issue{ + Pos: pass.Fset.Position(field.Tag.Pos()), + Message: msg, + InlineFix: InlineFix{ + StartCol: startCol, + Length: len(field.Tag.Value), + NewString: replaceStr, + }, + } + w.issues = append(w.issues, iss) + } + + if w.mode == StandaloneMode { + pass.Report(analysis.Diagnostic{ + Pos: field.Tag.Pos(), + End: field.Tag.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: field.Tag.Pos(), + End: field.Tag.End(), + NewText: []byte(replaceStr), + }, + }, + }, + }, + }) + } +} + +func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit + // process grouped fields + for _, fields := range w.consecutiveFieldsGroups { + offsets := make([]int, len(fields)) + + var maxTagNum int + var tagsGroup, notSortedTagsGroup [][]*structtag.Tag + + var uniqueKeys []string + addKey := func(k string) { + for _, key := range uniqueKeys { + if key == k { + return + } + } + uniqueKeys = append(uniqueKeys, k) + } + + for i := 0; i < len(fields); { + field := fields[i] + column := pass.Fset.Position(field.Tag.Pos()).Column - 1 + offsets[i] = column + + tag, err := strconv.Unquote(field.Tag.Value) + if err != nil { + // if tag value is not a valid string, report it directly + w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + fields = removeField(fields, i) + continue + } + + tags, err := structtag.Parse(tag) + if err != nil { + // if tag value is not a valid struct tag, report it directly + w.report(pass, field, column, err.Error(), field.Tag.Value) + fields = removeField(fields, i) + continue + } + + maxTagNum = max(maxTagNum, tags.Len()) + + if w.sort { + cp := make([]*structtag.Tag, tags.Len()) + for i, tag := range tags.Tags() { + cp[i] = tag + } + notSortedTagsGroup = append(notSortedTagsGroup, cp) + sortBy(w.fixedTagOrder, tags) + } + for _, t := range tags.Tags() { + addKey(t.Key) + } + tagsGroup = append(tagsGroup, tags.Tags()) + + i++ + } + + if w.sort && StrictStyle == w.style { + sortAllKeys(w.fixedTagOrder, uniqueKeys) + maxTagNum = len(uniqueKeys) + } + + // record the max length of each column tag + type tagLen struct { + Key string // present only when sort enabled + Len int + } + tagMaxLens := make([]tagLen, maxTagNum) + for j := 0; j < maxTagNum; j++ { + var maxLength int + var key string + for i := 0; i < len(tagsGroup); i++ { + if w.style == StrictStyle { + key = uniqueKeys[j] + // search by key + for _, tag := range tagsGroup[i] { + if tag.Key == key { + maxLength = max(maxLength, len(tag.String())) + break + } + } + } else { + if len(tagsGroup[i]) <= j { + // in case of index out of range + continue + } + maxLength = max(maxLength, len(tagsGroup[i][j].String())) + } + } + tagMaxLens[j] = tagLen{key, maxLength} + } + + for i, field := range fields { + tags := tagsGroup[i] + + var newTagStr string + if w.align { + // if align enabled, align tags. + newTagBuilder := strings.Builder{} + for i, n := 0, 0; i < len(tags) && n < len(tagMaxLens); { + tag := tags[i] + var format string + if w.style == StrictStyle { + if tagMaxLens[n].Key == tag.Key { + // match + format = alignFormat(tagMaxLens[n].Len + 1) // with an extra space + newTagBuilder.WriteString(fmt.Sprintf(format, tag.String())) + i++ + n++ + } else { + // tag missing + format = alignFormat(tagMaxLens[n].Len + 1) + newTagBuilder.WriteString(fmt.Sprintf(format, "")) + n++ + } + } else { + format = alignFormat(tagMaxLens[n].Len + 1) // with an extra space + newTagBuilder.WriteString(fmt.Sprintf(format, tag.String())) + i++ + n++ + } + } + newTagStr = newTagBuilder.String() + } else { + // otherwise check if tags order changed + if w.sort && reflect.DeepEqual(notSortedTagsGroup[i], tags) { + // if tags order not changed, do nothing + continue + } + tagsStr := make([]string, len(tags)) + for i, tag := range tags { + tagsStr[i] = tag.String() + } + newTagStr = strings.Join(tagsStr, " ") + } + + unquoteTag := strings.TrimRight(newTagStr, " ") + // unquoteTag := newTagStr + newTagValue := fmt.Sprintf("`%s`", unquoteTag) + if field.Tag.Value == newTagValue { + // nothing changed + continue + } + + msg := "tag is not aligned, should be: " + unquoteTag + + w.report(pass, field, offsets[i], msg, newTagValue) + } + } + + // process single fields + for _, field := range w.singleFields { + column := pass.Fset.Position(field.Tag.Pos()).Column - 1 + tag, err := strconv.Unquote(field.Tag.Value) + if err != nil { + w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + continue + } + + tags, err := structtag.Parse(tag) + if err != nil { + w.report(pass, field, column, err.Error(), field.Tag.Value) + continue + } + originalTags := append([]*structtag.Tag(nil), tags.Tags()...) + if w.sort { + sortBy(w.fixedTagOrder, tags) + } + + newTagValue := fmt.Sprintf("`%s`", tags.String()) + if reflect.DeepEqual(originalTags, tags.Tags()) && field.Tag.Value == newTagValue { + // if tags order not changed, do nothing + continue + } + + msg := "tag is not aligned , should be: " + tags.String() + + w.report(pass, field, column, msg, newTagValue) + } +} + +// Issues returns all issues found by the analyzer. +// It is used to integrate with golangci-lint. +func (w *Helper) Issues() []Issue { + log.Println("tagalign 's Issues() should only be called in golangci-lint mode") + return w.issues +} + +// sortBy sorts tags by fixed order. +// If a tag is not in the fixed order, it will be sorted by name. +func sortBy(fixedOrder []string, tags *structtag.Tags) { + // sort by fixed order + sort.Slice(tags.Tags(), func(i, j int) bool { + ti := tags.Tags()[i] + tj := tags.Tags()[j] + + oi := findIndex(fixedOrder, ti.Key) + oj := findIndex(fixedOrder, tj.Key) + + if oi == -1 && oj == -1 { + return ti.Key < tj.Key + } + + if oi == -1 { + return false + } + + if oj == -1 { + return true + } + + return oi < oj + }) +} + +func sortAllKeys(fixedOrder []string, keys []string) { + sort.Slice(keys, func(i, j int) bool { + oi := findIndex(fixedOrder, keys[i]) + oj := findIndex(fixedOrder, keys[j]) + + if oi == -1 && oj == -1 { + return keys[i] < keys[j] + } + + if oi == -1 { + return false + } + + if oj == -1 { + return true + } + + return oi < oj + }) +} + +func findIndex(s []string, e string) int { + for i, a := range s { + if a == e { + return i + } + } + return -1 +} + +func alignFormat(length int) string { + return "%" + fmt.Sprintf("-%ds", length) +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func removeField(fields []*ast.Field, index int) []*ast.Field { + if index < 0 || index >= len(fields) { + return fields + } + + return append(fields[:index], fields[index+1:]...) +} diff --git a/vendor/github.com/Abirdcfly/dupword/.gitignore b/vendor/github.com/Abirdcfly/dupword/.gitignore new file mode 100644 index 0000000000..b5109d2bbf --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/.gitignore @@ -0,0 +1,183 @@ + +# Godot-specific ignores +.import/ +export.cfg +export_presets.cfg + +# Mono-specific ignores +.mono/ +data_*/ + +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +.idea/artifacts +.idea/compiler.xml +.idea/jarRepositories.xml +.idea/modules.xml +.idea/*.iml +.idea/modules +*.iml +*.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### Emacs template +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* + +# Org-mode +.org-id-locations +*_archive + +# flymake-mode +*_flymake.* + +# eshell files +/eshell/history +/eshell/lastdir + +# elpa packages +/elpa/ + +# reftex files +*.rel + +# AUCTeX auto folder +/auto/ + +# cask packages +.cask/ +dist/ + +# Flycheck +flycheck_*.el + +# server auth directory +/server/ + +# projectiles files +.projectile + +# directory configuration +.dir-locals.el + +# network security +/network-security.data + + +### Vim template +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### macOS template +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + diff --git a/vendor/github.com/Abirdcfly/dupword/.goreleaser.yml b/vendor/github.com/Abirdcfly/dupword/.goreleaser.yml new file mode 100644 index 0000000000..c3401787a0 --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/.goreleaser.yml @@ -0,0 +1,72 @@ +--- +project_name: dupword + +release: + github: + owner: Abirdcfly + name: dupword + +builds: + - binary: dupword + goos: + - darwin + - windows + - linux + - freebsd + goarch: + - amd64 + - arm64 + - arm + - 386 + - ppc64le + - s390x + - mips64 + - mips64le + - riscv64 + goarm: + - 6 + - 7 + gomips: + - hardfloat + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm64 + main: ./cmd/dupword/ + flags: + - -trimpath + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} + +archives: + - format: tar.gz + wrap_in_directory: true + format_overrides: + - goos: windows + format: zip + name_template: '{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + files: + - LICENSE + - README.md + +snapshot: + name_template: SNAPSHOT-{{ .Commit }} + +checksum: + name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - '^build\(deps\): bump .* in /docs \(#\d+\)' + - '^build\(deps\): bump .* in /\.github/peril \(#\d+\)' + - Merge pull request + - Merge branch diff --git a/vendor/github.com/Abirdcfly/dupword/LICENSE b/vendor/github.com/Abirdcfly/dupword/LICENSE new file mode 100644 index 0000000000..afa64c6e1e --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Abirdcfly + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Abirdcfly/dupword/README.md b/vendor/github.com/Abirdcfly/dupword/README.md new file mode 100644 index 0000000000..e6c5b919fa --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/README.md @@ -0,0 +1,153 @@ +# dupword + +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/Abirdcfly/dupword?style=flat-square) +[![GoDoc](https://godoc.org/github.com/Abirdcfly/dupword?status.svg)](https://pkg.go.dev/github.com/Abirdcfly/dupword) +[![Actions Status](https://github.com/Abirdcfly/dupword/actions/workflows/lint.yml/badge.svg)](https://github.com/Abirdcfly/dupword/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/Abirdcfly/dupword)](https://goreportcard.com/report/github.com/Abirdcfly/dupword) + +A linter that checks for duplicate words in the source code (usually miswritten) + +Examples in real code and related issues can be viewed in [dupword#3](https://github.com/Abirdcfly/dupword/issues/3) + +## example + +1. Repeated words appear on two adjacent lines [commit](https://github.com/golang/go/commit/d8f90ce0f8119bf593efb6fb91825de5b61fcda7) + +```diff +--- a/src/cmd/compile/internal/ssa/schedule.go ++++ b/src/cmd/compile/internal/ssa/schedule.go +@@ -179,7 +179,7 @@ func schedule(f *Func) { + // scored CarryChainTail (and prove w is not a tail). + score[w.ID] = ScoreFlags + } +- // Verify v has not been scored. If v has not been visited, v may be the ++ // Verify v has not been scored. If v has not been visited, v may be + // the final (tail) operation in a carry chain. If v is not, v will be + // rescored above when v's carry-using op is scored. When scoring is done, + // only tail operations will retain the CarryChainTail score. +``` + +2. Repeated words appearing on the same line [commit](https://github.com/golang/go/commit/48da729e8468b630ee003ac51cbaac595d53bec8) + +```diff +--- a/src/net/http/cookiejar/jar.go ++++ b/src/net/http/cookiejar/jar.go +@@ -465,7 +465,7 @@ func (j *Jar) domainAndType(host, domain string) (string, bool, error) { + // dot in the domain-attribute before processing the cookie. + // + // Most browsers don't do that for IP addresses, only curl +- // version 7.54) and and IE (version 11) do not reject a ++ // version 7.54) and IE (version 11) do not reject a + // Set-Cookie: a=1; domain=.127.0.0.1 + // This leading dot is optional and serves only as hint for + // humans to indicate that a cookie with "domain=.bbc.co.uk" +``` + +## Install + +```bash +go install github.com/Abirdcfly/dupword/cmd/dupword@latest +``` + +**Or** install the main branch (including the last commit) with: + +```bash +go install github.com/Abirdcfly/dupword/cmd/dupword@main +``` + +## Usage + +### 1. default + +Run with default settings(include test file): + +**But note that not all repeated words are wrong** see [dupword#4](https://github.com/Abirdcfly/dupword/issues/4) for real code example. + +```bash +$ dupword ./... +/Users/xxx/go/src/dupword/dupword_test.go:88:10: Duplicate words (the) found +exit status 3 +``` + +### 2. skip test file + +Skip detection test file(`*_test.go`): + +```bash +$ dupword -test=false ./... +``` + +### 3. auto-fix + +```bash +$ dupword -fix ./... +``` + +### 4. all options + +All options: + +```bash +$ dupword --help +dupword: checks for duplicate words in the source code (usually miswritten) + +Usage: dupword [-flag] [package] + +This analyzer checks miswritten duplicate words in comments or package doc or string declaration + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -ignore value + ignore words + -json + emit JSON output + -keyword value + keywords for detecting duplicate words + -memprofile string + write memory profile to this file + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -test + indicates whether test files should be analyzed, too (default true) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + +### 5. my advice + +use `--keyword=the,and,a` and `-fix` together. I think that specifying only commonly repeated prepositions can effectively avoid false positives. + +see [dupword#4](https://github.com/Abirdcfly/dupword/issues/4) for real code example. + +```bash +$ dupword --keyword=the,and,a -fix ./... +``` + +## TODO + +- [x] add this linter to golangci-lint +- [ ] rewrite the detection logic to make it more efficient + +## Limitation + +1. Only for `*.go` file.But some miswritten occurs in `*.md` or `*.json` file.(example: kubernetes), In this case, my advice is to use [rg](https://github.com/BurntSushi/ripgrep) to do the lookup and replace manually. +2. When use `-fix`, also running `go fmt` in the dark.([This logic is determined upstream](https://github.com/golang/tools/blob/248c34b88a4148128f89e41923498bd86f805b7d/go/analysis/internal/checker/checker.go#L424-L433), the project does not have this part of the code.) + +## License + +MIT diff --git a/vendor/github.com/Abirdcfly/dupword/dupword.go b/vendor/github.com/Abirdcfly/dupword/dupword.go new file mode 100644 index 0000000000..c291eab527 --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/dupword.go @@ -0,0 +1,331 @@ +// MIT License +// +// Copyright (c) 2022 Abirdcfly +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package dupword defines an Analyzer that checks that duplicate words +// int the source code. +package dupword + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + Name = "dupword" + Doc = `checks for duplicate words in the source code (usually miswritten) + +This analyzer checks miswritten duplicate words in comments or package doc or string declaration` + Message = "Duplicate words (%s) found" + CommentPrefix = `//` +) + +var ( + defaultWord = []string{} + // defaultWord = []string{"the", "and", "a"} + ignoreWord = map[string]bool{} +) + +type analyzer struct { + KeyWord []string +} + +func (a *analyzer) String() string { + return strings.Join(a.KeyWord, ",") +} + +func (a *analyzer) Set(w string) error { + if len(w) != 0 { + a.KeyWord = make([]string, 0) + a.KeyWord = append(a.KeyWord, strings.Split(w, ",")...) + } + return nil +} + +type ignore struct { +} + +func (a *ignore) String() string { + t := make([]string,0, len(ignoreWord)) + for k := range ignoreWord { + t = append(t, k) + } + return strings.Join(t, ",") +} + +func (a *ignore) Set(w string) error { + for _, k := range strings.Split(w, ","){ + ignoreWord[k] = true + } + return nil +} + +// for test only +func ClearIgnoreWord() { + ignoreWord = map[string]bool{} +} + +func NewAnalyzer() *analysis.Analyzer { + ignore := &ignore{} + analyzer := &analyzer{KeyWord: defaultWord} + a := &analysis.Analyzer{ + Name: Name, + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: analyzer.run, + RunDespiteErrors: true, + } + a.Flags.Init(Name, flag.ExitOnError) + a.Flags.Var(analyzer, "keyword", "keywords for detecting duplicate words") + a.Flags.Var(ignore, "ignore", "ignore words") + a.Flags.Var(version{}, "V", "print version and exit") + return a +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + a.fixDuplicateWordInComment(pass, file) + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.BasicLit)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + if lit, ok := n.(*ast.BasicLit); ok { + a.fixDuplicateWordInString(pass, lit) + } + }) + return nil, nil +} + +func (a *analyzer) fixDuplicateWordInComment(pass *analysis.Pass, f *ast.File) { + for _, cg := range f.Comments { + var preLine *ast.Comment + for _, c := range cg.List { + update, keyword, find := a.Check(c.Text) + if find { + pass.Report(analysis.Diagnostic{Pos: c.Slash, End: c.End(), Message: fmt.Sprintf(Message, keyword), SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Update", + TextEdits: []analysis.TextEdit{{ + Pos: c.Slash, + End: c.End(), + NewText: []byte(update), + }}, + }}}) + } + if preLine != nil { + fields := strings.Fields(preLine.Text) + if len(fields) < 1 { + continue + } + preLineContent := fields[len(fields)-1] + "\n" + thisLineContent := c.Text + if find { + thisLineContent = update + } + before, after, _ := strings.Cut(thisLineContent, CommentPrefix) + update, keyword, find := a.Check(preLineContent + after) + if find { + var suggestedFixes []analysis.SuggestedFix + if strings.Contains(update, preLineContent) { + update = before + CommentPrefix + strings.TrimPrefix(update, preLineContent) + suggestedFixes = []analysis.SuggestedFix{{ + Message: "Update", + TextEdits: []analysis.TextEdit{{ + Pos: c.Slash, + End: c.End(), + NewText: []byte(update), + }}, + }} + } + pass.Report(analysis.Diagnostic{Pos: c.Slash, End: c.End(), Message: fmt.Sprintf(Message, keyword), SuggestedFixes: suggestedFixes}) + } + } + preLine = c + } + } +} + +func (a *analyzer) fixDuplicateWordInString(pass *analysis.Pass, lit *ast.BasicLit) { + if lit.Kind != token.STRING { + return + } + value, err := strconv.Unquote(lit.Value) + if err != nil { + fmt.Printf("lit.Value:%v, err: %v\n", lit.Value, err) + // fall back to default + value = lit.Value + } + quote := value != lit.Value + update, keyword, find := a.Check(value) + if quote { + update = strconv.Quote(update) + } + if find { + pass.Report(analysis.Diagnostic{Pos: lit.Pos(), End: lit.End(), Message: fmt.Sprintf(Message, keyword), SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Update", + TextEdits: []analysis.TextEdit{{ + Pos: lit.Pos(), + End: lit.End(), + NewText: []byte(update), + }}, + }}}) + } +} + +// CheckOneKey use to check there is a defined duplicate word in a string. +// raw is checked line. key is the keyword to check. empty means just check duplicate word. +func CheckOneKey(raw, key string) (new string, findWord string, find bool) { + if key == "" { + has := false + fields := strings.Fields(raw) + for i := range fields { + if i == len(fields)-1 { + break + } + if fields[i] == fields[i+1] { + has = true + } + } + if !has { + return + } + } else { + if x := strings.Split(raw, key); len(x) < 2 { + return + } + } + + findWordMap := make(map[string]bool, 4) + newLine := strings.Builder{} + wordStart, spaceStart := 0, 0 + curWord, preWord := "", "" + lastSpace := "" + var lastRune int32 + for i, r := range raw { + if !unicode.IsSpace(r) && unicode.IsSpace(lastRune) { + // word start position + /* + i + | + hello[ spaceA ]the[ spaceB ]the[ spaceC ]word + ^ ^ + | curWord: the + preWord: the + */ + symbol := raw[spaceStart:i] + if ((key != "" && curWord == key) || key == "") && curWord == preWord && curWord != "" { + if !ExcludeWords(curWord) { + find = true + findWordMap[curWord] = true + newLine.WriteString(lastSpace) + symbol = "" + } + } else { + newLine.WriteString(lastSpace) + newLine.WriteString(curWord) + } + lastSpace = symbol + preWord = curWord + wordStart = i + } else if unicode.IsSpace(r) && !unicode.IsSpace(lastRune) { + // space start position + spaceStart = i + curWord = raw[wordStart:i] + } else if i == len(raw)-1 { + // last position + word := raw[wordStart:] + if ((key != "" && word == key) || key == "") && word == preWord { + if !ExcludeWords(word) { + find = true + findWordMap[word] = true + } + } else { + newLine.WriteString(lastSpace) + newLine.WriteString(word) + } + } + lastRune = r + } + if find { + new = newLine.String() + findWordSlice := make([]string, len(findWordMap)) + i := 0 + for k := range findWordMap { + findWordSlice[i] = k + i++ + } + sort.Strings(findWordSlice) + findWord = strings.Join(findWordSlice, ",") + } + return +} + +func (a *analyzer) Check(raw string) (update string, keyword string, find bool) { + for _, key := range a.KeyWord { + updateOne, _, findOne := CheckOneKey(raw, key) + if findOne { + raw = updateOne + find = findOne + update = updateOne + if keyword == "" { + keyword = key + } else { + keyword = keyword + "," + key + } + } + } + if len(a.KeyWord) == 0 { + return CheckOneKey(raw, "") + } + return +} + +// ExcludeWords determines whether duplicate words should be reported, +// +// e.g. %s, should not be reported. +func ExcludeWords(word string) (exclude bool) { + firstRune, _ := utf8.DecodeRuneInString(word) + if unicode.IsDigit(firstRune) { + return true + } + if unicode.IsPunct(firstRune) { + return true + } + if unicode.IsSymbol(firstRune) { + return true + } + if _, exist := ignoreWord[word]; exist { + return true + } + return false +} diff --git a/vendor/github.com/Abirdcfly/dupword/version.go b/vendor/github.com/Abirdcfly/dupword/version.go new file mode 100644 index 0000000000..9d892d0c98 --- /dev/null +++ b/vendor/github.com/Abirdcfly/dupword/version.go @@ -0,0 +1,41 @@ +// MIT License +// +// # Copyright (c) 2022 Abirdcfly +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package dupword + +import ( + "fmt" + "os" +) + +var Version = "dev" + +type version struct{} + +func (version) IsBoolFlag() bool { return true } +func (version) Get() interface{} { return nil } +func (version) String() string { return "" } +func (version) Set(_ string) error { + fmt.Println(Version) + os.Exit(0) + return nil +} diff --git a/vendor/github.com/Antonboom/errname/LICENSE b/vendor/github.com/Antonboom/errname/LICENSE new file mode 100644 index 0000000000..e2002e4d43 --- /dev/null +++ b/vendor/github.com/Antonboom/errname/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..aa85225108 --- /dev/null +++ b/vendor/github.com/Antonboom/errname/pkg/analyzer/analyzer.go @@ -0,0 +1,135 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" + "unicode" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// New returns new errname analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "errname", + Doc: "Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +type stringSet = map[string]struct{} + +var ( + importNodes = []ast.Node{(*ast.ImportSpec)(nil)} + typeNodes = []ast.Node{(*ast.TypeSpec)(nil)} + funcNodes = []ast.Node{(*ast.FuncDecl)(nil)} +) + +func run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + pkgAliases := map[string]string{} + insp.Preorder(importNodes, func(node ast.Node) { + i := node.(*ast.ImportSpec) + if n := i.Name; n != nil && i.Path != nil { + if path, err := strconv.Unquote(i.Path.Value); err == nil { + pkgAliases[n.Name] = getPkgFromPath(path) + } + } + }) + + allTypes := stringSet{} + typesSpecs := map[string]*ast.TypeSpec{} + insp.Preorder(typeNodes, func(node ast.Node) { + t := node.(*ast.TypeSpec) + allTypes[t.Name.Name] = struct{}{} + typesSpecs[t.Name.Name] = t + }) + + errorTypes := stringSet{} + insp.Preorder(funcNodes, func(node ast.Node) { + f := node.(*ast.FuncDecl) + t, ok := isMethodError(f) + if !ok { + return + } + errorTypes[t] = struct{}{} + + tSpec, ok := typesSpecs[t] + if !ok { + panic(fmt.Sprintf("no specification for type %q", t)) + } + + if _, ok := tSpec.Type.(*ast.ArrayType); ok { + if !isValidErrorArrayTypeName(t) { + reportAboutErrorType(pass, tSpec.Pos(), t, true) + } + } else if !isValidErrorTypeName(t) { + reportAboutErrorType(pass, tSpec.Pos(), t, false) + } + }) + + errorFuncs := stringSet{} + insp.Preorder(funcNodes, func(node ast.Node) { + f := node.(*ast.FuncDecl) + if isFuncReturningErr(f.Type, allTypes, errorTypes) { + errorFuncs[f.Name.Name] = struct{}{} + } + }) + + inspectPkgLevelVarsOnly := func(node ast.Node) bool { + switch v := node.(type) { + case *ast.FuncDecl: + return false + + case *ast.ValueSpec: + if name, ok := isSentinelError(v, pkgAliases, allTypes, errorTypes, errorFuncs); ok && !isValidErrorVarName(name) { + reportAboutErrorVar(pass, v.Pos(), name) + } + } + return true + } + for _, f := range pass.Files { + ast.Inspect(f, inspectPkgLevelVarsOnly) + } + + return nil, nil //nolint:nilnil +} + +func reportAboutErrorType(pass *analysis.Pass, typePos token.Pos, typeName string, isArrayType bool) { + var form string + if unicode.IsLower([]rune(typeName)[0]) { + form = "xxxError" + } else { + form = "XxxError" + } + + if isArrayType { + form += "s" + } + pass.Reportf(typePos, "the type name `%s` should conform to the `%s` format", typeName, form) +} + +func reportAboutErrorVar(pass *analysis.Pass, pos token.Pos, varName string) { + var form string + if unicode.IsLower([]rune(varName)[0]) { + form = "errXxx" + } else { + form = "ErrXxx" + } + pass.Reportf(pos, "the variable name `%s` should conform to the `%s` format", varName, form) +} + +func getPkgFromPath(p string) string { + idx := strings.LastIndex(p, "/") + if idx == -1 { + return p + } + return p[idx+1:] +} diff --git a/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go b/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go new file mode 100644 index 0000000000..06f8d61d8e --- /dev/null +++ b/vendor/github.com/Antonboom/errname/pkg/analyzer/facts.go @@ -0,0 +1,272 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + "unicode" +) + +func isMethodError(f *ast.FuncDecl) (typeName string, ok bool) { + if f.Recv == nil || len(f.Recv.List) != 1 { + return "", false + } + if f.Name == nil || f.Name.Name != "Error" { + return "", false + } + + if f.Type == nil || f.Type.Results == nil || len(f.Type.Results.List) != 1 { + return "", false + } + + returnType, ok := f.Type.Results.List[0].Type.(*ast.Ident) + if !ok { + return "", false + } + + var receiverType string + + unwrapIdentName := func(e ast.Expr) string { + switch v := e.(type) { + case *ast.Ident: + return v.Name + case *ast.IndexExpr: + if i, ok := v.X.(*ast.Ident); ok { + return i.Name + } + case *ast.IndexListExpr: + if i, ok := v.X.(*ast.Ident); ok { + return i.Name + } + } + panic(fmt.Errorf("unsupported Error() receiver type %q", types.ExprString(e))) + } + + switch rt := f.Recv.List[0].Type; v := rt.(type) { + case *ast.Ident, *ast.IndexExpr, *ast.IndexListExpr: // SomeError, SomeError[T], SomeError[T1, T2, ...] + receiverType = unwrapIdentName(rt) + + case *ast.StarExpr: // *SomeError, *SomeError[T], *SomeError[T1, T2, ...] + receiverType = unwrapIdentName(v.X) + } + + return receiverType, returnType.Name == "string" +} + +func isValidErrorTypeName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["error"] != 1 { + return false + } + return words[len(words)-1] == "error" +} + +func isValidErrorArrayTypeName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["errors"] != 1 { + return false + } + return words[len(words)-1] == "errors" +} + +func isFuncReturningErr(fType *ast.FuncType, allTypes, errorTypes stringSet) bool { + if fType == nil || fType.Results == nil || len(fType.Results.List) != 1 { + return false + } + + var returnTypeName string + switch rt := fType.Results.List[0].Type.(type) { + case *ast.Ident: + returnTypeName = rt.Name + case *ast.StarExpr: + if i, ok := rt.X.(*ast.Ident); ok { + returnTypeName = i.Name + } + } + + return isErrorType(returnTypeName, allTypes, errorTypes) +} + +func isErrorType(tName string, allTypes, errorTypes stringSet) bool { + _, isUserType := allTypes[tName] + _, isErrType := errorTypes[tName] + return isErrType || (tName == "error" && !isUserType) +} + +var knownErrConstructors = stringSet{ + "fmt.Errorf": {}, + "errors.Errorf": {}, + "errors.New": {}, + "errors.Newf": {}, + "errors.NewWithDepth": {}, + "errors.NewWithDepthf": {}, + "errors.NewAssertionErrorWithWrappedErrf": {}, +} + +func isSentinelError( //nolint:gocognit,gocyclo + v *ast.ValueSpec, + pkgAliases map[string]string, + allTypes, errorTypes, errorFuncs stringSet, +) (varName string, ok bool) { + if len(v.Names) != 1 { + return "", false + } + varName = v.Names[0].Name + + switch vv := v.Type.(type) { + // var ErrEndOfFile error + // var ErrEndOfFile SomeErrType + case *ast.Ident: + if isErrorType(vv.Name, allTypes, errorTypes) { + return varName, true + } + + // var ErrEndOfFile *SomeErrType + case *ast.StarExpr: + if i, ok := vv.X.(*ast.Ident); ok && isErrorType(i.Name, allTypes, errorTypes) { + return varName, true + } + } + + if len(v.Values) != 1 { + return "", false + } + + switch vv := v.Values[0].(type) { + case *ast.CallExpr: + switch fun := vv.Fun.(type) { + // var ErrEndOfFile = errors.New("end of file") + case *ast.SelectorExpr: + pkg, ok := fun.X.(*ast.Ident) + if !ok { + return "", false + } + pkgFun := fun.Sel + + pkgName := pkg.Name + if a, ok := pkgAliases[pkgName]; ok { + pkgName = a + } + + _, ok = knownErrConstructors[pkgName+"."+pkgFun.Name] + return varName, ok + + // var ErrEndOfFile = newErrEndOfFile() + // var ErrEndOfFile = new(EndOfFileError) + // const ErrEndOfFile = constError("end of file") + // var statusCodeError = new(SomePtrError[string]) + case *ast.Ident: + if isErrorType(fun.Name, allTypes, errorTypes) { + return varName, true + } + + if _, ok := errorFuncs[fun.Name]; ok { + return varName, true + } + + if fun.Name == "new" && len(vv.Args) == 1 { + switch i := vv.Args[0].(type) { + case *ast.Ident: + return varName, isErrorType(i.Name, allTypes, errorTypes) + case *ast.IndexExpr: + if ii, ok := i.X.(*ast.Ident); ok { + return varName, isErrorType(ii.Name, allTypes, errorTypes) + } + } + } + + // var ErrEndOfFile = func() error { ... } + case *ast.FuncLit: + return varName, isFuncReturningErr(fun.Type, allTypes, errorTypes) + } + + // var ErrEndOfFile = &EndOfFileError{} + // var ErrOK = &SomePtrError[string]{Code: "200 OK"} + case *ast.UnaryExpr: + if vv.Op == token.AND { // & + if lit, ok := vv.X.(*ast.CompositeLit); ok { + switch i := lit.Type.(type) { + case *ast.Ident: + return varName, isErrorType(i.Name, allTypes, errorTypes) + case *ast.IndexExpr: + if ii, ok := i.X.(*ast.Ident); ok { + return varName, isErrorType(ii.Name, allTypes, errorTypes) + } + } + } + } + + // var ErrEndOfFile = EndOfFileError{} + // var ErrNotFound = SomeError[string]{Code: "Not Found"} + case *ast.CompositeLit: + switch i := vv.Type.(type) { + case *ast.Ident: + return varName, isErrorType(i.Name, allTypes, errorTypes) + case *ast.IndexExpr: + if ii, ok := i.X.(*ast.Ident); ok { + return varName, isErrorType(ii.Name, allTypes, errorTypes) + } + } + } + + return "", false +} + +func isValidErrorVarName(s string) bool { + if isInitialism(s) { + return true + } + + words := split(s) + wordsCnt := wordsCount(words) + + if wordsCnt["err"] != 1 { + return false + } + return words[0] == "err" +} + +func isInitialism(s string) bool { + return strings.ToLower(s) == s || strings.ToUpper(s) == s +} + +func split(s string) []string { + var words []string + ss := []rune(s) + + var b strings.Builder + b.WriteRune(ss[0]) + + for _, r := range ss[1:] { + if unicode.IsUpper(r) { + words = append(words, strings.ToLower(b.String())) + b.Reset() + } + b.WriteRune(r) + } + + words = append(words, strings.ToLower(b.String())) + return words +} + +func wordsCount(w []string) map[string]int { + result := make(map[string]int, len(w)) + for _, ww := range w { + result[ww]++ + } + return result +} diff --git a/vendor/github.com/Antonboom/nilnil/LICENSE b/vendor/github.com/Antonboom/nilnil/LICENSE new file mode 100644 index 0000000000..e2002e4d43 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..e980db5462 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -0,0 +1,158 @@ +package analyzer + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "nilnil" + doc = "Checks that there is no simultaneous return of `nil` error and an invalid value." + + reportMsg = "return both the `nil` error and invalid value: use a sentinel error instead" +) + +// New returns new nilnil analyzer. +func New() *analysis.Analyzer { + n := newNilNil() + + a := &analysis.Analyzer{ + Name: name, + Doc: doc, + Run: n.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + a.Flags.Var(&n.checkedTypes, "checked-types", "coma separated list") + + return a +} + +type nilNil struct { + checkedTypes checkedTypes +} + +func newNilNil() *nilNil { + return &nilNil{ + checkedTypes: newDefaultCheckedTypes(), + } +} + +var ( + types = []ast.Node{(*ast.TypeSpec)(nil)} + + funcAndReturns = []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.ReturnStmt)(nil), + } +) + +type typeSpecByName map[string]typer + +func (n *nilNil) run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + typeSpecs := typeSpecByName{ + "any": newTyper(new(ast.InterfaceType)), + } + insp.Preorder(types, func(node ast.Node) { + t := node.(*ast.TypeSpec) + typeSpecs[t.Name.Name] = newTyper(t.Type) + }) + + var fs funcTypeStack + insp.Nodes(funcAndReturns, func(node ast.Node, push bool) (proceed bool) { + switch v := node.(type) { + case *ast.FuncLit: + if push { + fs.Push(v.Type) + } else { + fs.Pop() + } + + case *ast.FuncDecl: + if push { + fs.Push(v.Type) + } else { + fs.Pop() + } + + case *ast.ReturnStmt: + ft := fs.Top() // Current function. + + if !push || len(v.Results) != 2 || ft == nil || ft.Results == nil || len(ft.Results.List) != 2 { + return false + } + + fRes1, fRes2 := ft.Results.List[0], ft.Results.List[1] + if !(n.isDangerNilField(fRes1, typeSpecs) && n.isErrorField(fRes2)) { + return false + } + + rRes1, rRes2 := v.Results[0], v.Results[1] + if isNil(rRes1) && isNil(rRes2) { + pass.Reportf(v.Pos(), reportMsg) + } + } + + return true + }) + + return nil, nil //nolint:nilnil +} + +func (n *nilNil) isDangerNilField(f *ast.Field, typeSpecs typeSpecByName) bool { + return n.isDangerNilType(f.Type, typeSpecs) +} + +func (n *nilNil) isDangerNilType(t ast.Expr, typeSpecs typeSpecByName) bool { + switch v := t.(type) { + case *ast.StarExpr: + return n.checkedTypes.Contains(ptrType) + + case *ast.FuncType: + return n.checkedTypes.Contains(funcType) + + case *ast.InterfaceType: + return n.checkedTypes.Contains(ifaceType) + + case *ast.MapType: + return n.checkedTypes.Contains(mapType) + + case *ast.ChanType: + return n.checkedTypes.Contains(chanType) + + case *ast.Ident: + if t, ok := typeSpecs[v.Name]; ok { + return n.isDangerNilType(t.Type(), typeSpecs) + } + } + return false +} + +func (n *nilNil) isErrorField(f *ast.Field) bool { + return isIdent(f.Type, "error") +} + +func isNil(e ast.Expr) bool { + return isIdent(e, "nil") +} + +func isIdent(n ast.Node, name string) bool { + i, ok := n.(*ast.Ident) + if !ok { + return false + } + return i.Name == name +} + +type typer interface { + Type() ast.Expr +} + +func newTyper(t ast.Expr) typer { return typerImpl{t: t} } // +type typerImpl struct{ t ast.Expr } // +func (ti typerImpl) Type() ast.Expr { return ti.t } diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/config.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/config.go new file mode 100644 index 0000000000..520b813a54 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/config.go @@ -0,0 +1,77 @@ +package analyzer + +import ( + "fmt" + "sort" + "strings" +) + +func newDefaultCheckedTypes() checkedTypes { + return checkedTypes{ + ptrType: struct{}{}, + funcType: struct{}{}, + ifaceType: struct{}{}, + mapType: struct{}{}, + chanType: struct{}{}, + } +} + +const separator = ',' + +type typeName string + +func (t typeName) S() string { + return string(t) +} + +const ( + ptrType typeName = "ptr" + funcType typeName = "func" + ifaceType typeName = "iface" + mapType typeName = "map" + chanType typeName = "chan" +) + +var knownTypes = []typeName{ptrType, funcType, ifaceType, mapType, chanType} + +type checkedTypes map[typeName]struct{} + +func (c checkedTypes) Contains(t typeName) bool { + _, ok := c[t] + return ok +} + +func (c checkedTypes) String() string { + result := make([]string, 0, len(c)) + for t := range c { + result = append(result, t.S()) + } + + sort.Strings(result) + return strings.Join(result, string(separator)) +} + +func (c checkedTypes) Set(s string) error { + types := strings.FieldsFunc(s, func(c rune) bool { return c == separator }) + if len(types) == 0 { + return nil + } + + c.disableAll() + for _, t := range types { + switch tt := typeName(t); tt { + case ptrType, funcType, ifaceType, mapType, chanType: + c[tt] = struct{}{} + default: + return fmt.Errorf("unknown checked type name %q (see help)", t) + } + } + + return nil +} + +func (c checkedTypes) disableAll() { + for k := range c { + delete(c, k) + } +} diff --git a/vendor/github.com/Antonboom/nilnil/pkg/analyzer/func_type_stack.go b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/func_type_stack.go new file mode 100644 index 0000000000..0817615470 --- /dev/null +++ b/vendor/github.com/Antonboom/nilnil/pkg/analyzer/func_type_stack.go @@ -0,0 +1,29 @@ +package analyzer + +import ( + "go/ast" +) + +type funcTypeStack []*ast.FuncType + +func (s *funcTypeStack) Push(f *ast.FuncType) { + *s = append(*s, f) +} + +func (s *funcTypeStack) Pop() *ast.FuncType { + if len(*s) == 0 { + return nil + } + + last := len(*s) - 1 + f := (*s)[last] + *s = (*s)[:last] + return f +} + +func (s *funcTypeStack) Top() *ast.FuncType { + if len(*s) == 0 { + return nil + } + return (*s)[len(*s)-1] +} diff --git a/vendor/github.com/Antonboom/testifylint/LICENSE b/vendor/github.com/Antonboom/testifylint/LICENSE new file mode 100644 index 0000000000..9b1cf3a393 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Anton Telyshev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Antonboom/testifylint/analyzer/analyzer.go b/vendor/github.com/Antonboom/testifylint/analyzer/analyzer.go new file mode 100644 index 0000000000..84d7e815dc --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/analyzer/analyzer.go @@ -0,0 +1,93 @@ +package analyzer + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/checkers" + "github.com/Antonboom/testifylint/internal/config" + "github.com/Antonboom/testifylint/internal/testify" +) + +const ( + name = "testifylint" + doc = "Checks usage of " + testify.ModulePath + "." + url = "https://github.com/antonboom/" + name +) + +// New returns new instance of testifylint analyzer. +func New() *analysis.Analyzer { + cfg := config.NewDefault() + + analyzer := &analysis.Analyzer{ + Name: name, + Doc: doc, + URL: url, + Run: func(pass *analysis.Pass) (any, error) { + regularCheckers, advancedCheckers, err := newCheckers(cfg) + if err != nil { + return nil, fmt.Errorf("build checkers: %v", err) + } + + tl := &testifyLint{ + regularCheckers: regularCheckers, + advancedCheckers: advancedCheckers, + } + return tl.run(pass) + }, + } + config.BindToFlags(&cfg, &analyzer.Flags) + + return analyzer +} + +type testifyLint struct { + regularCheckers []checkers.RegularChecker + advancedCheckers []checkers.AdvancedChecker +} + +func (tl *testifyLint) run(pass *analysis.Pass) (any, error) { + filesToAnalysis := make([]*ast.File, 0, len(pass.Files)) + for _, f := range pass.Files { + if !analysisutil.Imports(f, testify.AssertPkgPath, testify.RequirePkgPath, testify.SuitePkgPath) { + continue + } + filesToAnalysis = append(filesToAnalysis, f) + } + + insp := inspector.New(filesToAnalysis) + + // Regular checkers. + insp.Preorder([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node) { + tl.regularCheck(pass, node.(*ast.CallExpr)) + }) + + // Advanced checkers. + for _, ch := range tl.advancedCheckers { + for _, d := range ch.Check(pass, insp) { + pass.Report(d) + } + } + + return nil, nil +} + +func (tl *testifyLint) regularCheck(pass *analysis.Pass, ce *ast.CallExpr) { + call := checkers.NewCallMeta(pass, ce) + if nil == call { + return + } + + for _, ch := range tl.regularCheckers { + if d := ch.Check(pass, call); d != nil { + pass.Report(*d) + // NOTE(a.telyshev): I'm not interested in multiple diagnostics per assertion. + // This simplifies the code and also makes the linter more efficient. + return + } + } +} diff --git a/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go b/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go new file mode 100644 index 0000000000..de1d6017f5 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go @@ -0,0 +1,71 @@ +package analyzer + +import ( + "fmt" + + "github.com/Antonboom/testifylint/internal/checkers" + "github.com/Antonboom/testifylint/internal/config" +) + +// newCheckers accepts linter config and returns slices of enabled checkers sorted by priority. +func newCheckers(cfg config.Config) ([]checkers.RegularChecker, []checkers.AdvancedChecker, error) { + if err := cfg.Validate(); err != nil { + return nil, nil, err + } + + enabledCheckersSet := make(map[string]struct{}) + + if cfg.EnableAll { + for _, checker := range checkers.All() { + enabledCheckersSet[checker] = struct{}{} + } + } else if !cfg.DisableAll { + for _, checker := range checkers.EnabledByDefault() { + enabledCheckersSet[checker] = struct{}{} + } + } + + for _, checker := range cfg.EnabledCheckers { + enabledCheckersSet[checker] = struct{}{} + } + + for _, checker := range cfg.DisabledCheckers { + delete(enabledCheckersSet, checker) + } + + enabledCheckers := make([]string, 0, len(enabledCheckersSet)) + for v := range enabledCheckersSet { + enabledCheckers = append(enabledCheckers, v) + } + checkers.SortByPriority(enabledCheckers) + + regularCheckers := make([]checkers.RegularChecker, 0, len(enabledCheckers)) + advancedCheckers := make([]checkers.AdvancedChecker, 0, len(enabledCheckers)/2) + + for _, name := range enabledCheckers { + ch, ok := checkers.Get(name) + if !ok { + return nil, nil, fmt.Errorf("unknown checker %q", name) + } + + switch c := ch.(type) { + case *checkers.ExpectedActual: + c.SetExpVarPattern(cfg.ExpectedActual.ExpVarPattern.Regexp) + + case *checkers.RequireError: + c.SetFnPattern(cfg.RequireError.FnPattern.Regexp) + + case *checkers.SuiteExtraAssertCall: + c.SetMode(cfg.SuiteExtraAssertCall.Mode) + } + + switch casted := ch.(type) { + case checkers.RegularChecker: + regularCheckers = append(regularCheckers, casted) + case checkers.AdvancedChecker: + advancedCheckers = append(advancedCheckers, casted) + } + } + + return regularCheckers, advancedCheckers, nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/doc.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/doc.go new file mode 100644 index 0000000000..b57cbd9384 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/doc.go @@ -0,0 +1,9 @@ +// Package analysisutil contains functions common for `analyzer` and `internal/checkers` packages. +// In addition, it is intended to "lighten" these packages. +// +// If the function is common to several packages, or it makes sense to test it separately without +// "polluting" the target package with tests of private functionality, then you can put function in this package. +// +// It's important to avoid dependency on `golang.org/x/tools/go/analysis` in the helpers API. +// This makes the API "narrower" and also allows you to test functions without some "abstraction leaks". +package analysisutil diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/file.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/file.go new file mode 100644 index 0000000000..3fc1f42b86 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/file.go @@ -0,0 +1,28 @@ +package analysisutil + +import ( + "go/ast" + "strconv" +) + +// Imports tells if the file imports at least one of the packages. +// If no packages provided then function returns false. +func Imports(file *ast.File, pkgs ...string) bool { + for _, i := range file.Imports { + if i.Path == nil { + continue + } + + path, err := strconv.Unquote(i.Path.Value) + if err != nil { + continue + } + // NOTE(a.telyshev): Don't use `slices.Contains` to keep the minimum module version 1.20. + for _, pkg := range pkgs { // Small O(n). + if pkg == path { + return true + } + } + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/format.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/format.go new file mode 100644 index 0000000000..fcb4b847f6 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/format.go @@ -0,0 +1,34 @@ +package analysisutil + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" +) + +// NodeString is a more powerful analogue of types.ExprString. +// Return empty string if node AST is invalid. +func NodeString(fset *token.FileSet, node ast.Node) string { + if v := formatNode(fset, node); v != nil { + return v.String() + } + return "" +} + +// NodeBytes works as NodeString but returns a byte slice. +// Return nil if node AST is invalid. +func NodeBytes(fset *token.FileSet, node ast.Node) []byte { + if v := formatNode(fset, node); v != nil { + return v.Bytes() + } + return nil +} + +func formatNode(fset *token.FileSet, node ast.Node) *bytes.Buffer { + buf := new(bytes.Buffer) + if err := format.Node(buf, fset, node); err != nil { + return nil + } + return buf +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/object.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/object.go new file mode 100644 index 0000000000..4e0346d2ba --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/object.go @@ -0,0 +1,34 @@ +package analysisutil + +import ( + "go/ast" + "go/types" +) + +// ObjectOf works in context of Golang package and returns types.Object for the given object's package and name. +// The search is based on the provided package and its dependencies (imports). +// Returns nil if the object is not found. +func ObjectOf(pkg *types.Package, objPkg, objName string) types.Object { + if pkg.Path() == objPkg { + return pkg.Scope().Lookup(objName) + } + + for _, i := range pkg.Imports() { + if trimVendor(i.Path()) == objPkg { + return i.Scope().Lookup(objName) + } + } + return nil +} + +// IsObj returns true if expression is identifier which notes to given types.Object. +// Useful in combination with types.Universe objects. +func IsObj(typesInfo *types.Info, expr ast.Expr, expected types.Object) bool { + id, ok := expr.(*ast.Ident) + if !ok { + return false + } + + obj := typesInfo.ObjectOf(id) + return obj == expected +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/analysisutil/pkg.go b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/pkg.go new file mode 100644 index 0000000000..d34be5d341 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/analysisutil/pkg.go @@ -0,0 +1,19 @@ +package analysisutil + +import ( + "go/types" + "strings" +) + +// IsPkg checks that package has corresponding objName and path. +// Supports vendored packages. +func IsPkg(pkg *types.Package, name, path string) bool { + return pkg.Name() == name && trimVendor(pkg.Path()) == path +} + +func trimVendor(path string) string { + if strings.HasPrefix(path, "vendor/") { + return path[len("vendor/"):] + } + return path +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/blank_import.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/blank_import.go new file mode 100644 index 0000000000..403691e270 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/blank_import.go @@ -0,0 +1,69 @@ +package checkers + +import ( + "fmt" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/testify" +) + +// BlankImport detects useless blank imports of testify's packages. +// These imports are useless since testify doesn't do any magic with init() function. +// +// The checker detects situations like +// +// import ( +// "testing" +// +// _ "github.com/stretchr/testify" +// _ "github.com/stretchr/testify/assert" +// _ "github.com/stretchr/testify/http" +// _ "github.com/stretchr/testify/mock" +// _ "github.com/stretchr/testify/require" +// _ "github.com/stretchr/testify/suite" +// ) +// +// and requires +// +// import ( +// "testing" +// ) +type BlankImport struct{} + +// NewBlankImport constructs BlankImport checker. +func NewBlankImport() BlankImport { return BlankImport{} } +func (BlankImport) Name() string { return "blank-import" } + +func (checker BlankImport) Check(pass *analysis.Pass, _ *inspector.Inspector) (diagnostics []analysis.Diagnostic) { + for _, file := range pass.Files { + for _, imp := range file.Imports { + if imp.Name == nil || imp.Name.Name != "_" { + continue + } + + pkg, err := strconv.Unquote(imp.Path.Value) + if err != nil { + continue + } + if _, ok := packagesNotIntendedForBlankImport[pkg]; !ok { + continue + } + + msg := fmt.Sprintf("avoid blank import of %s as it does nothing", pkg) + diagnostics = append(diagnostics, *newDiagnostic(checker.Name(), imp, msg, nil)) + } + } + return diagnostics +} + +var packagesNotIntendedForBlankImport = map[string]struct{}{ + testify.ModulePath: {}, + testify.AssertPkgPath: {}, + testify.HTTPPkgPath: {}, + testify.MockPkgPath: {}, + testify.RequirePkgPath: {}, + testify.SuitePkgPath: {}, +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go new file mode 100644 index 0000000000..c8db9420e7 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go @@ -0,0 +1,248 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// BoolCompare detects situations like +// +// assert.Equal(t, false, result) +// assert.EqualValues(t, false, result) +// assert.Exactly(t, false, result) +// assert.NotEqual(t, true, result) +// assert.NotEqualValues(t, true, result) +// assert.False(t, !result) +// assert.True(t, result == true) +// ... +// +// and requires +// +// assert.False(t, result) +// assert.True(t, result) +type BoolCompare struct{} // + +// NewBoolCompare constructs BoolCompare checker. +func NewBoolCompare() BoolCompare { return BoolCompare{} } +func (BoolCompare) Name() string { return "bool-compare" } + +func (checker BoolCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + newUseFnDiagnostic := func(proposed string, survivingArg ast.Node, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }), + ) + } + + newUseTrueDiagnostic := func(survivingArg ast.Node, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + return newUseFnDiagnostic("True", survivingArg, replaceStart, replaceEnd) + } + + newUseFalseDiagnostic := func(survivingArg ast.Node, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + return newUseFnDiagnostic("False", survivingArg, replaceStart, replaceEnd) + } + + newNeedSimplifyDiagnostic := func(survivingArg ast.Node, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { + return newDiagnostic(checker.Name(), call, "need to simplify the assertion", + &analysis.SuggestedFix{ + Message: "Simplify the assertion", + TextEdits: []analysis.TextEdit{{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }}, + }, + ) + } + + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + if len(call.Args) < 2 { + return nil + } + + arg1, arg2 := call.Args[0], call.Args[1] + if isEmptyInterface(pass, arg1) || isEmptyInterface(pass, arg2) { + return nil + } + + t1, t2 := isUntypedTrue(pass, arg1), isUntypedTrue(pass, arg2) + f1, f2 := isUntypedFalse(pass, arg1), isUntypedFalse(pass, arg2) + + switch { + case xor(t1, t2): + survivingArg, _ := anyVal([]bool{t1, t2}, arg2, arg1) + return newUseTrueDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + + case xor(f1, f2): + survivingArg, _ := anyVal([]bool{f1, f2}, arg2, arg1) + return newUseFalseDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + } + + case "NotEqual", "NotEqualValues": + if len(call.Args) < 2 { + return nil + } + + arg1, arg2 := call.Args[0], call.Args[1] + if isEmptyInterface(pass, arg1) || isEmptyInterface(pass, arg2) { + return nil + } + + t1, t2 := isUntypedTrue(pass, arg1), isUntypedTrue(pass, arg2) + f1, f2 := isUntypedFalse(pass, arg1), isUntypedFalse(pass, arg2) + + switch { + case xor(t1, t2): + survivingArg, _ := anyVal([]bool{t1, t2}, arg2, arg1) + return newUseFalseDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + + case xor(f1, f2): + survivingArg, _ := anyVal([]bool{f1, f2}, arg2, arg1) + return newUseTrueDiagnostic(survivingArg, arg1.Pos(), arg2.End()) + } + + case "True": + if len(call.Args) < 1 { + return nil + } + expr := call.Args[0] + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.EQL) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.NEQ) + + survivingArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2) + if ok && !isEmptyInterface(pass, survivingArg) { + return newNeedSimplifyDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.NEQ) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.EQL) + arg3, ok3 := isNegation(expr) + + survivingArg, ok := anyVal([]bool{ok1, ok2, ok3}, arg1, arg2, arg3) + if ok && !isEmptyInterface(pass, survivingArg) { + return newUseFalseDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + + case "False": + if len(call.Args) < 1 { + return nil + } + expr := call.Args[0] + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.EQL) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.NEQ) + + survivingArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2) + if ok && !isEmptyInterface(pass, survivingArg) { + return newNeedSimplifyDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + + { + arg1, ok1 := isComparisonWithTrue(pass, expr, token.NEQ) + arg2, ok2 := isComparisonWithFalse(pass, expr, token.EQL) + arg3, ok3 := isNegation(expr) + + survivingArg, ok := anyVal([]bool{ok1, ok2, ok3}, arg1, arg2, arg3) + if ok && !isEmptyInterface(pass, survivingArg) { + return newUseTrueDiagnostic(survivingArg, expr.Pos(), expr.End()) + } + } + } + return nil +} + +var ( + falseObj = types.Universe.Lookup("false") + trueObj = types.Universe.Lookup("true") +) + +func isUntypedTrue(pass *analysis.Pass, e ast.Expr) bool { + return analysisutil.IsObj(pass.TypesInfo, e, trueObj) +} + +func isUntypedFalse(pass *analysis.Pass, e ast.Expr) bool { + return analysisutil.IsObj(pass.TypesInfo, e, falseObj) +} + +func isComparisonWithTrue(pass *analysis.Pass, e ast.Expr, op token.Token) (ast.Expr, bool) { + return isComparisonWith(pass, e, isUntypedTrue, op) +} + +func isComparisonWithFalse(pass *analysis.Pass, e ast.Expr, op token.Token) (ast.Expr, bool) { + return isComparisonWith(pass, e, isUntypedFalse, op) +} + +type predicate func(pass *analysis.Pass, e ast.Expr) bool + +func isComparisonWith(pass *analysis.Pass, e ast.Expr, predicate predicate, op token.Token) (ast.Expr, bool) { + be, ok := e.(*ast.BinaryExpr) + if !ok { + return nil, false + } + if be.Op != op { + return nil, false + } + + t1, t2 := predicate(pass, be.X), predicate(pass, be.Y) + if xor(t1, t2) { + if t1 { + return be.Y, true + } + return be.X, true + } + return nil, false +} + +func isNegation(e ast.Expr) (ast.Expr, bool) { + ue, ok := e.(*ast.UnaryExpr) + if !ok { + return nil, false + } + return ue.X, ue.Op == token.NOT +} + +func xor(a, b bool) bool { + return a != b +} + +// anyVal returns the first value[i] for which bools[i] is true. +func anyVal[T any](bools []bool, vals ...T) (T, bool) { + if len(bools) != len(vals) { + panic("inconsistent usage of valOr") //nolint:forbidigo // Does not depend on the code being analyzed. + } + + for i, b := range bools { + if b { + return vals[i], true + } + } + + var _default T + return _default, false +} + +func isEmptyInterface(pass *analysis.Pass, expr ast.Expr) bool { + t, ok := pass.TypesInfo.Types[expr] + if !ok { + return false + } + + iface, ok := t.Type.Underlying().(*types.Interface) + return ok && iface.NumMethods() == 0 +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/call_meta.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/call_meta.go new file mode 100644 index 0000000000..44eed49a62 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/call_meta.go @@ -0,0 +1,136 @@ +package checkers + +import ( + "go/ast" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// CallMeta stores meta info about assertion function/method call, for example +// +// assert.Equal(t, 42, result, "helpful comment") +type CallMeta struct { + // Range contains start and end position of assertion call. + analysis.Range + // IsPkg true if this is package (not object) call. + IsPkg bool + // IsAssert true if this is "testify/assert" package (or object) call. + IsAssert bool + // Selector is the AST expression of "assert.Equal". + Selector *ast.SelectorExpr + // SelectorXStr is a string representation of Selector's left part – value before point, e.g. "assert". + SelectorXStr string + // Fn stores meta info about assertion function itself. + Fn FnMeta + // Args stores assertion call arguments but without `t *testing.T` argument. + // E.g [42, result, "helpful comment"]. + Args []ast.Expr + // ArgsRaw stores assertion call initial arguments. + // E.g [t, 42, result, "helpful comment"]. + ArgsRaw []ast.Expr +} + +func (c CallMeta) String() string { + return c.SelectorXStr + "." + c.Fn.Name +} + +// FnMeta stores meta info about assertion function itself, for example "Equal". +type FnMeta struct { + // Range contains start and end position of function Name. + analysis.Range + // Name is a function name. + Name string + // NameFTrimmed is a function name without "f" suffix. + NameFTrimmed string + // IsFmt is true if function is formatted, e.g. "Equalf". + IsFmt bool +} + +// NewCallMeta returns meta information about testify assertion call. +// Returns nil if ast.CallExpr is not testify call. +func NewCallMeta(pass *analysis.Pass, ce *ast.CallExpr) *CallMeta { + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok || se.Sel == nil { + return nil + } + fnName := se.Sel.Name + + initiatorPkg, isPkgCall := func() (*types.Package, bool) { + // Examples: + // s.Assert -> method of *suite.Suite -> package suite ("vendor/github.com/stretchr/testify/suite") + // s.Assert().Equal -> method of *assert.Assertions -> package assert ("vendor/github.com/stretchr/testify/assert") + // s.Equal -> method of *assert.Assertions -> package assert ("vendor/github.com/stretchr/testify/assert") + // reqObj.Falsef -> method of *require.Assertions -> package require ("vendor/github.com/stretchr/testify/require") + if sel, ok := pass.TypesInfo.Selections[se]; ok { + return sel.Obj().Pkg(), false + } + + // Examples: + // assert.False -> assert -> package assert ("vendor/github.com/stretchr/testify/assert") + // require.NotEqualf -> require -> package require ("vendor/github.com/stretchr/testify/require") + if id, ok := se.X.(*ast.Ident); ok { + if selObj := pass.TypesInfo.ObjectOf(id); selObj != nil { + if pkg, ok := selObj.(*types.PkgName); ok { + return pkg.Imported(), true + } + } + } + return nil, false + }() + if initiatorPkg == nil { + return nil + } + + isAssert := analysisutil.IsPkg(initiatorPkg, testify.AssertPkgName, testify.AssertPkgPath) + isRequire := analysisutil.IsPkg(initiatorPkg, testify.RequirePkgName, testify.RequirePkgPath) + if !(isAssert || isRequire) { + return nil + } + + return &CallMeta{ + Range: ce, + IsPkg: isPkgCall, + IsAssert: isAssert, + Selector: se, + SelectorXStr: analysisutil.NodeString(pass.Fset, se.X), + Fn: FnMeta{ + Range: se.Sel, + Name: fnName, + NameFTrimmed: strings.TrimSuffix(fnName, "f"), + IsFmt: strings.HasSuffix(fnName, "f"), + }, + Args: trimTArg(pass, ce.Args), + ArgsRaw: ce.Args, + } +} + +func trimTArg(pass *analysis.Pass, args []ast.Expr) []ast.Expr { + if len(args) == 0 { + return args + } + + if isTestingTPtr(pass, args[0]) { + return args[1:] + } + return args +} + +func isTestingTPtr(pass *analysis.Pass, arg ast.Expr) bool { + assertTestingTObj := analysisutil.ObjectOf(pass.Pkg, testify.AssertPkgPath, "TestingT") + requireTestingTObj := analysisutil.ObjectOf(pass.Pkg, testify.RequirePkgPath, "TestingT") + + argType := pass.TypesInfo.TypeOf(arg) + if argType == nil { + return false + } + + return ((assertTestingTObj != nil) && + types.Implements(argType, assertTestingTObj.Type().Underlying().(*types.Interface))) || + ((requireTestingTObj != nil) && + types.Implements(argType, requireTestingTObj.Type().Underlying().(*types.Interface))) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/checker.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/checker.go new file mode 100644 index 0000000000..ac23af6f6f --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/checker.go @@ -0,0 +1,23 @@ +package checkers + +import ( + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +// Checker describes named checker. +type Checker interface { + Name() string +} + +// RegularChecker check assertion call presented in CallMeta form. +type RegularChecker interface { + Checker + Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic +} + +// AdvancedChecker implements complex Check logic different from trivial CallMeta check. +type AdvancedChecker interface { + Checker + Check(pass *analysis.Pass, inspector *inspector.Inspector) []analysis.Diagnostic +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go new file mode 100644 index 0000000000..e34a21bf9c --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go @@ -0,0 +1,105 @@ +package checkers + +import ( + "sort" +) + +// registry stores checkers meta information in checkers' priority order. +var registry = checkersRegistry{ + // Regular checkers. + {factory: asCheckerFactory(NewFloatCompare), enabledByDefault: true}, + {factory: asCheckerFactory(NewBoolCompare), enabledByDefault: true}, + {factory: asCheckerFactory(NewEmpty), enabledByDefault: true}, + {factory: asCheckerFactory(NewLen), enabledByDefault: true}, + {factory: asCheckerFactory(NewCompares), enabledByDefault: true}, + {factory: asCheckerFactory(NewErrorNil), enabledByDefault: true}, + {factory: asCheckerFactory(NewNilCompare), enabledByDefault: true}, + {factory: asCheckerFactory(NewErrorIsAs), enabledByDefault: true}, + {factory: asCheckerFactory(NewExpectedActual), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteExtraAssertCall), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteDontUsePkg), enabledByDefault: true}, + {factory: asCheckerFactory(NewUselessAssert), enabledByDefault: true}, + // Advanced checkers. + {factory: asCheckerFactory(NewBlankImport), enabledByDefault: true}, + {factory: asCheckerFactory(NewGoRequire), enabledByDefault: true}, + {factory: asCheckerFactory(NewRequireError), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteTHelper), enabledByDefault: false}, +} + +type checkersRegistry []checkerMeta + +type checkerMeta struct { + factory checkerFactory + enabledByDefault bool +} + +type checkerFactory func() Checker + +func asCheckerFactory[T Checker](fn func() T) checkerFactory { + return func() Checker { + return fn() + } +} + +func (r checkersRegistry) get(name string) (m checkerMeta, priority int, found bool) { + for i, meta := range r { + if meta.factory().Name() == name { + return meta, i, true + } + } + return checkerMeta{}, 0, false +} + +// All returns all checkers names sorted by checker's priority. +func All() []string { + result := make([]string, 0, len(registry)) + for _, meta := range registry { + result = append(result, meta.factory().Name()) + } + return result +} + +// EnabledByDefault returns checkers enabled by default sorted by checker's priority. +func EnabledByDefault() []string { + result := make([]string, 0, len(registry)) + for _, meta := range registry { + if meta.enabledByDefault { + result = append(result, meta.factory().Name()) + } + } + return result +} + +// Get returns new checker instance by checker's name. +func Get(name string) (Checker, bool) { + meta, _, ok := registry.get(name) + if ok { + return meta.factory(), true + } + return nil, false +} + +// IsKnown checks if there is a checker with that name. +func IsKnown(name string) bool { + _, _, ok := registry.get(name) + return ok +} + +// IsEnabledByDefault returns true if a checker is enabled by default. +// Returns false if there is no such checker in the registry. +// For pre-validation use Get or IsKnown. +func IsEnabledByDefault(name string) bool { + meta, _, ok := registry.get(name) + return ok && meta.enabledByDefault +} + +// SortByPriority mutates the input checkers names by sorting them in checker priority order. +// Ignores unknown checkers. For pre-validation use Get or IsKnown. +func SortByPriority(checkers []string) { + sort.Slice(checkers, func(i, j int) bool { + lhs, rhs := checkers[i], checkers[j] + _, lhsPriority, _ := registry.get(lhs) + _, rhsPriority, _ := registry.get(rhs) + return lhsPriority < rhsPriority + }) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/compares.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/compares.go new file mode 100644 index 0000000000..336a345124 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/compares.go @@ -0,0 +1,96 @@ +package checkers + +import ( + "bytes" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// Compares detects situations like +// +// assert.True(t, a == b) +// assert.True(t, a != b) +// assert.True(t, a > b) +// assert.True(t, a >= b) +// assert.True(t, a < b) +// assert.True(t, a <= b) +// assert.False(t, a == b) +// ... +// +// and requires +// +// assert.Equal(t, a, b) +// assert.NotEqual(t, a, b) +// assert.Greater(t, a, b) +// assert.GreaterOrEqual(t, a, b) +// assert.Less(t, a, b) +// assert.LessOrEqual(t, a, b) +type Compares struct{} + +// NewCompares constructs Compares checker. +func NewCompares() Compares { return Compares{} } +func (Compares) Name() string { return "compares" } + +func (checker Compares) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if len(call.Args) < 1 { + return nil + } + + be, ok := call.Args[0].(*ast.BinaryExpr) + if !ok { + return nil + } + + var tokenToProposedFn map[token.Token]string + + switch call.Fn.NameFTrimmed { + case "True": + tokenToProposedFn = tokenToProposedFnInsteadOfTrue + case "False": + tokenToProposedFn = tokenToProposedFnInsteadOfFalse + default: + return nil + } + + if proposedFn, ok := tokenToProposedFn[be.Op]; ok { + a, b := be.X, be.Y + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: be.X.Pos(), + End: be.Y.End(), + NewText: formatAsCallArgs(pass, a, b), + }), + ) + } + return nil +} + +var tokenToProposedFnInsteadOfTrue = map[token.Token]string{ + token.EQL: "Equal", + token.NEQ: "NotEqual", + token.GTR: "Greater", + token.GEQ: "GreaterOrEqual", + token.LSS: "Less", + token.LEQ: "LessOrEqual", +} + +var tokenToProposedFnInsteadOfFalse = map[token.Token]string{ + token.EQL: "NotEqual", + token.NEQ: "Equal", + token.GTR: "LessOrEqual", + token.GEQ: "Less", + token.LSS: "GreaterOrEqual", + token.LEQ: "Greater", +} + +// formatAsCallArgs joins a and b and return bytes like `a, b`. +func formatAsCallArgs(pass *analysis.Pass, a, b ast.Node) []byte { + return bytes.Join([][]byte{ + analysisutil.NodeBytes(pass.Fset, a), + analysisutil.NodeBytes(pass.Fset, b), + }, []byte(", ")) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/diagnostic.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/diagnostic.go new file mode 100644 index 0000000000..4ab69c69bb --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/diagnostic.go @@ -0,0 +1,60 @@ +package checkers + +import ( + "fmt" + + "golang.org/x/tools/go/analysis" +) + +func newUseFunctionDiagnostic( + checker string, + call *CallMeta, + proposedFn string, + fix *analysis.SuggestedFix, +) *analysis.Diagnostic { + f := proposedFn + if call.Fn.IsFmt { + f += "f" + } + msg := fmt.Sprintf("use %s.%s", call.SelectorXStr, f) + + return newDiagnostic(checker, call, msg, fix) +} + +func newDiagnostic( + checker string, + rng analysis.Range, + msg string, + fix *analysis.SuggestedFix, +) *analysis.Diagnostic { + d := analysis.Diagnostic{ + Pos: rng.Pos(), + End: rng.End(), + Category: checker, + Message: checker + ": " + msg, + } + if fix != nil { + d.SuggestedFixes = []analysis.SuggestedFix{*fix} + } + return &d +} + +func newSuggestedFuncReplacement( + call *CallMeta, + proposedFn string, + additionalEdits ...analysis.TextEdit, +) *analysis.SuggestedFix { + if call.Fn.IsFmt { + proposedFn += "f" + } + return &analysis.SuggestedFix{ + Message: fmt.Sprintf("Replace `%s` with `%s`", call.Fn.Name, proposedFn), + TextEdits: append([]analysis.TextEdit{ + { + Pos: call.Fn.Pos(), + End: call.Fn.End(), + NewText: []byte(proposedFn), + }, + }, additionalEdits...), + } +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go new file mode 100644 index 0000000000..5ad371bb4f --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go @@ -0,0 +1,172 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// Empty detects situations like +// +// assert.Len(t, arr, 0) +// assert.Equal(t, 0, len(arr)) +// assert.EqualValues(t, 0, len(arr)) +// assert.Exactly(t, 0, len(arr)) +// assert.LessOrEqual(t, len(arr), 0) +// assert.GreaterOrEqual(t, 0, len(arr)) +// assert.Less(t, len(arr), 0) +// assert.Greater(t, 0, len(arr)) +// assert.Less(t, len(arr), 1) +// assert.Greater(t, 1, len(arr)) +// +// assert.NotEqual(t, 0, len(arr)) +// assert.NotEqualValues(t, 0, len(arr)) +// assert.Less(t, 0, len(arr)) +// assert.Greater(t, len(arr), 0) +// +// and requires +// +// assert.Empty(t, arr) +// assert.NotEmpty(t, arr) +type Empty struct{} + +// NewEmpty constructs Empty checker. +func NewEmpty() Empty { return Empty{} } +func (Empty) Name() string { return "empty" } + +func (checker Empty) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if d := checker.checkEmpty(pass, call); d != nil { + return d + } + return checker.checkNotEmpty(pass, call) +} + +func (checker Empty) checkEmpty(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { //nolint:gocognit + newUseEmptyDiagnostic := func(replaceStart, replaceEnd token.Pos, replaceWith ast.Expr) *analysis.Diagnostic { + const proposed = "Empty" + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, replaceWith), + }), + ) + } + + if len(call.Args) < 2 { + return nil + } + a, b := call.Args[0], call.Args[1] + + switch call.Fn.NameFTrimmed { + case "Len": + if isZero(b) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), a) + } + + case "Equal", "EqualValues", "Exactly": + arg1, ok1 := isLenCallAndZero(pass, a, b) + arg2, ok2 := isLenCallAndZero(pass, b, a) + + if lenArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2); ok { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "LessOrEqual": + if lenArg, ok := isBuiltinLenCall(pass, a); ok && isZero(b) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "GreaterOrEqual": + if lenArg, ok := isBuiltinLenCall(pass, b); ok && isZero(a) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Less": + if lenArg, ok := isBuiltinLenCall(pass, a); ok && (isOne(b) || isZero(b)) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Greater": + if lenArg, ok := isBuiltinLenCall(pass, b); ok && (isOne(a) || isZero(a)) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + } + return nil +} + +func (checker Empty) checkNotEmpty(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { //nolint:gocognit + newUseNotEmptyDiagnostic := func(replaceStart, replaceEnd token.Pos, replaceWith ast.Expr) *analysis.Diagnostic { + const proposed = "NotEmpty" + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: replaceStart, + End: replaceEnd, + NewText: analysisutil.NodeBytes(pass.Fset, replaceWith), + }), + ) + } + + if len(call.Args) < 2 { + return nil + } + a, b := call.Args[0], call.Args[1] + + switch call.Fn.NameFTrimmed { + case "NotEqual", "NotEqualValues": + arg1, ok1 := isLenCallAndZero(pass, a, b) + arg2, ok2 := isLenCallAndZero(pass, b, a) + + if lenArg, ok := anyVal([]bool{ok1, ok2}, arg1, arg2); ok { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Less": + if lenArg, ok := isBuiltinLenCall(pass, b); ok && isZero(a) { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + + case "Greater": + if lenArg, ok := isBuiltinLenCall(pass, a); ok && isZero(b) { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), lenArg) + } + } + return nil +} + +var lenObj = types.Universe.Lookup("len") + +func isLenCallAndZero(pass *analysis.Pass, a, b ast.Expr) (ast.Expr, bool) { + lenArg, ok := isBuiltinLenCall(pass, a) + return lenArg, ok && isZero(b) +} + +func isBuiltinLenCall(pass *analysis.Pass, e ast.Expr) (ast.Expr, bool) { + ce, ok := e.(*ast.CallExpr) + if !ok { + return nil, false + } + + if analysisutil.IsObj(pass.TypesInfo, ce.Fun, lenObj) && len(ce.Args) == 1 { + return ce.Args[0], true + } + return nil, false +} + +func isZero(e ast.Expr) bool { + return isIntNumber(e, 0) +} + +func isOne(e ast.Expr) bool { + return isIntNumber(e, 1) +} + +func isIntNumber(e ast.Expr, v int) bool { + bl, ok := e.(*ast.BasicLit) + return ok && bl.Kind == token.INT && bl.Value == fmt.Sprintf("%d", v) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go new file mode 100644 index 0000000000..0363873a63 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go @@ -0,0 +1,166 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// ErrorIsAs detects situations like +// +// assert.Error(t, err, errSentinel) +// assert.NoError(t, err, errSentinel) +// assert.True(t, errors.Is(err, errSentinel)) +// assert.False(t, errors.Is(err, errSentinel)) +// assert.True(t, errors.As(err, &target)) +// +// and requires +// +// assert.ErrorIs(t, err, errSentinel) +// assert.NotErrorIs(t, err, errSentinel) +// assert.ErrorAs(t, err, &target) +// +// Also ErrorIsAs repeats go vet's "errorsas" check logic. +type ErrorIsAs struct{} + +// NewErrorIsAs constructs ErrorIsAs checker. +func NewErrorIsAs() ErrorIsAs { return ErrorIsAs{} } +func (ErrorIsAs) Name() string { return "error-is-as" } + +func (checker ErrorIsAs) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + switch call.Fn.NameFTrimmed { + case "Error": + if len(call.Args) >= 2 && isError(pass, call.Args[1]) { + const proposed = "ErrorIs" + msg := fmt.Sprintf("invalid usage of %[1]s.Error, use %[1]s.%[2]s instead", call.SelectorXStr, proposed) + return newDiagnostic(checker.Name(), call, msg, newSuggestedFuncReplacement(call, proposed)) + } + + case "NoError": + if len(call.Args) >= 2 && isError(pass, call.Args[1]) { + const proposed = "NotErrorIs" + msg := fmt.Sprintf("invalid usage of %[1]s.NoError, use %[1]s.%[2]s instead", call.SelectorXStr, proposed) + return newDiagnostic(checker.Name(), call, msg, newSuggestedFuncReplacement(call, proposed)) + } + + case "True": + if len(call.Args) < 1 { + return nil + } + + ce, ok := call.Args[0].(*ast.CallExpr) + if !ok { + return nil + } + if len(ce.Args) != 2 { + return nil + } + + var proposed string + switch { + case isErrorsIsCall(pass, ce): + proposed = "ErrorIs" + case isErrorsAsCall(pass, ce): + proposed = "ErrorAs" + } + if proposed != "" { + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: ce.Pos(), + End: ce.End(), + NewText: formatAsCallArgs(pass, ce.Args[0], ce.Args[1]), + }), + ) + } + + case "False": + if len(call.Args) < 1 { + return nil + } + + ce, ok := call.Args[0].(*ast.CallExpr) + if !ok { + return nil + } + if len(ce.Args) != 2 { + return nil + } + + if isErrorsIsCall(pass, ce) { + const proposed = "NotErrorIs" + return newUseFunctionDiagnostic(checker.Name(), call, proposed, + newSuggestedFuncReplacement(call, proposed, analysis.TextEdit{ + Pos: ce.Pos(), + End: ce.End(), + NewText: formatAsCallArgs(pass, ce.Args[0], ce.Args[1]), + }), + ) + } + + case "ErrorAs": + if len(call.Args) < 2 { + return nil + } + + // NOTE(a.telyshev): Logic below must be consistent with + // https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/errorsas/errorsas.go + + var ( + defaultReport = fmt.Sprintf("second argument to %s must be a non-nil pointer to either a type that implements error, or to any interface type", call) //nolint:lll + errorPtrReport = fmt.Sprintf("second argument to %s should not be *error", call) + ) + + target := call.Args[1] + + if isEmptyInterface(pass, target) { + // `any` interface case. It is always allowed, since it often indicates + // a value forwarded from another source. + return nil + } + + tv, ok := pass.TypesInfo.Types[target] + if !ok { + return nil + } + + pt, ok := tv.Type.Underlying().(*types.Pointer) + if !ok { + return newDiagnostic(checker.Name(), call, defaultReport, nil) + } + if pt.Elem() == errorType { + return newDiagnostic(checker.Name(), call, errorPtrReport, nil) + } + + _, isInterface := pt.Elem().Underlying().(*types.Interface) + if !isInterface && !types.Implements(pt.Elem(), errorIface) { + return newDiagnostic(checker.Name(), call, defaultReport, nil) + } + } + return nil +} + +func isErrorsIsCall(pass *analysis.Pass, ce *ast.CallExpr) bool { + return isErrorsPkgFnCall(pass, ce, "Is") +} + +func isErrorsAsCall(pass *analysis.Pass, ce *ast.CallExpr) bool { + return isErrorsPkgFnCall(pass, ce, "As") +} + +func isErrorsPkgFnCall(pass *analysis.Pass, ce *ast.CallExpr, fn string) bool { + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + + errorsIsObj := analysisutil.ObjectOf(pass.Pkg, "errors", fn) + if errorsIsObj == nil { + return false + } + + return analysisutil.IsObj(pass.TypesInfo, se.Sel, errorsIsObj) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/error_nil.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_nil.go new file mode 100644 index 0000000000..5b0af7458f --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/error_nil.go @@ -0,0 +1,113 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// ErrorNil detects situations like +// +// assert.Nil(t, err) +// assert.NotNil(t, err) +// assert.Equal(t, nil, err) +// assert.EqualValues(t, nil, err) +// assert.Exactly(t, nil, err) +// assert.ErrorIs(t, err, nil) +// +// assert.NotEqual(t, nil, err) +// assert.NotEqualValues(t, nil, err) +// assert.NotErrorIs(t, err, nil) +// +// and requires +// +// assert.NoError(t, err) +// assert.Error(t, err) +type ErrorNil struct{} + +// NewErrorNil constructs ErrorNil checker. +func NewErrorNil() ErrorNil { return ErrorNil{} } +func (ErrorNil) Name() string { return "error-nil" } + +func (checker ErrorNil) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + const ( + errorFn = "Error" + noErrorFn = "NoError" + ) + + proposedFn, survivingArg, replacementEndPos := func() (string, ast.Expr, token.Pos) { + switch call.Fn.NameFTrimmed { + case "Nil": + if len(call.Args) >= 1 && isError(pass, call.Args[0]) { + return noErrorFn, call.Args[0], call.Args[0].End() + } + + case "NotNil": + if len(call.Args) >= 1 && isError(pass, call.Args[0]) { + return errorFn, call.Args[0], call.Args[0].End() + } + + case "Equal", "EqualValues", "Exactly", "ErrorIs": + if len(call.Args) < 2 { + return "", nil, token.NoPos + } + a, b := call.Args[0], call.Args[1] + + switch { + case isError(pass, a) && isNil(b): + return noErrorFn, a, b.End() + case isNil(a) && isError(pass, b): + return noErrorFn, b, b.End() + } + + case "NotEqual", "NotEqualValues", "NotErrorIs": + if len(call.Args) < 2 { + return "", nil, token.NoPos + } + a, b := call.Args[0], call.Args[1] + + switch { + case isError(pass, a) && isNil(b): + return errorFn, a, b.End() + case isNil(a) && isError(pass, b): + return errorFn, b, b.End() + } + } + return "", nil, token.NoPos + }() + + if proposedFn != "" { + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: call.Args[0].Pos(), + End: replacementEndPos, + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }), + ) + } + return nil +} + +var ( + errorType = types.Universe.Lookup("error").Type() + errorIface = errorType.Underlying().(*types.Interface) +) + +func isError(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + if t == nil { + return false + } + + _, ok := t.Underlying().(*types.Interface) + return ok && types.Implements(t, errorIface) +} + +func isNil(expr ast.Expr) bool { + ident, ok := expr.(*ast.Ident) + return ok && ident.Name == "nil" +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go new file mode 100644 index 0000000000..e6825eaa67 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go @@ -0,0 +1,215 @@ +package checkers + +import ( + "go/ast" + "go/token" + "go/types" + "regexp" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// DefaultExpectedVarPattern matches variables with "expected" or "wanted" prefix or suffix in the name. +var DefaultExpectedVarPattern = regexp.MustCompile( + `(^(exp(ected)?|want(ed)?)([A-Z]\w*)?$)|(^(\w*[a-z])?(Exp(ected)?|Want(ed)?)$)`) + +// ExpectedActual detects situation like +// +// assert.Equal(t, result, expected) +// assert.EqualExportedValues(t, resultObj, User{Name: "Anton"}) +// assert.EqualValues(t, result, 42) +// assert.Exactly(t, result, int64(42)) +// assert.JSONEq(t, result, `{"version": 3}`) +// assert.InDelta(t, result, 42.42, 1.0) +// assert.InDeltaMapValues(t, result, map[string]float64{"score": 0.99}, 1.0) +// assert.InDeltaSlice(t, result, []float64{0.98, 0.99}, 1.0) +// assert.InEpsilon(t, result, 42.42, 0.0001) +// assert.InEpsilonSlice(t, result, []float64{0.9801, 0.9902}, 0.0001) +// assert.IsType(t, result, (*User)(nil)) +// assert.NotEqual(t, result, "expected") +// assert.NotEqualValues(t, result, "expected") +// assert.NotSame(t, resultPtr, &value) +// assert.Same(t, resultPtr, &value) +// assert.WithinDuration(t, resultTime, time.Date(2023, 01, 12, 11, 46, 33, 0, nil), time.Second) +// assert.YAMLEq(t, result, "version: '3'") +// +// and requires +// +// assert.Equal(t, expected, result) +// assert.EqualExportedValues(t, User{Name: "Anton"}, resultObj) +// assert.EqualValues(t, 42, result) +// ... +type ExpectedActual struct { + expVarPattern *regexp.Regexp +} + +// NewExpectedActual constructs ExpectedActual checker using DefaultExpectedVarPattern. +func NewExpectedActual() *ExpectedActual { + return &ExpectedActual{expVarPattern: DefaultExpectedVarPattern} +} + +func (ExpectedActual) Name() string { return "expected-actual" } + +func (checker *ExpectedActual) SetExpVarPattern(p *regexp.Regexp) *ExpectedActual { + if p != nil { + checker.expVarPattern = p + } + return checker +} + +func (checker ExpectedActual) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + switch call.Fn.NameFTrimmed { + case "Equal", + "EqualExportedValues", + "EqualValues", + "Exactly", + "InDelta", + "InDeltaMapValues", + "InDeltaSlice", + "InEpsilon", + "InEpsilonSlice", + "IsType", + "JSONEq", + "NotEqual", + "NotEqualValues", + "NotSame", + "Same", + "WithinDuration", + "YAMLEq": + default: + return nil + } + + if len(call.Args) < 2 { + return nil + } + first, second := call.Args[0], call.Args[1] + + if checker.isWrongExpectedActualOrder(pass, first, second) { + return newDiagnostic(checker.Name(), call, "need to reverse actual and expected values", &analysis.SuggestedFix{ + Message: "Reverse actual and expected values", + TextEdits: []analysis.TextEdit{ + { + Pos: first.Pos(), + End: second.End(), + NewText: formatAsCallArgs(pass, second, first), + }, + }, + }) + } + return nil +} + +func (checker ExpectedActual) isWrongExpectedActualOrder(pass *analysis.Pass, first, second ast.Expr) bool { + leftIsCandidate := checker.isExpectedValueCandidate(pass, first) + rightIsCandidate := checker.isExpectedValueCandidate(pass, second) + return rightIsCandidate && !leftIsCandidate +} + +func (checker ExpectedActual) isExpectedValueCandidate(pass *analysis.Pass, expr ast.Expr) bool { + switch v := expr.(type) { + case *ast.ParenExpr: + return checker.isExpectedValueCandidate(pass, v.X) + + case *ast.StarExpr: // *value + return checker.isExpectedValueCandidate(pass, v.X) + + case *ast.UnaryExpr: + return (v.Op == token.AND) && checker.isExpectedValueCandidate(pass, v.X) // &value + + case *ast.CompositeLit: + return true + + case *ast.CallExpr: + return isParenExpr(v) || + isCastedBasicLitOrExpectedValue(v, checker.expVarPattern) || + isExpectedValueFactory(pass, v, checker.expVarPattern) + } + + return isBasicLit(expr) || + isUntypedConst(pass, expr) || + isTypedConst(pass, expr) || + isIdentNamedAsExpected(checker.expVarPattern, expr) || + isStructVarNamedAsExpected(checker.expVarPattern, expr) || + isStructFieldNamedAsExpected(checker.expVarPattern, expr) +} + +func isParenExpr(ce *ast.CallExpr) bool { + _, ok := ce.Fun.(*ast.ParenExpr) + return ok +} + +func isCastedBasicLitOrExpectedValue(ce *ast.CallExpr, pattern *regexp.Regexp) bool { + if len(ce.Args) != 1 { + return false + } + + fn, ok := ce.Fun.(*ast.Ident) + if !ok { + return false + } + + switch fn.Name { + case "complex64", "complex128": + return true + + case "uint", "uint8", "uint16", "uint32", "uint64", + "int", "int8", "int16", "int32", "int64", + "float32", "float64", + "rune", "string": + return isBasicLit(ce.Args[0]) || isIdentNamedAsExpected(pattern, ce.Args[0]) + } + return false +} + +func isExpectedValueFactory(pass *analysis.Pass, ce *ast.CallExpr, pattern *regexp.Regexp) bool { + switch fn := ce.Fun.(type) { + case *ast.Ident: + return pattern.MatchString(fn.Name) + + case *ast.SelectorExpr: + timeDateFn := analysisutil.ObjectOf(pass.Pkg, "time", "Date") + if timeDateFn != nil && analysisutil.IsObj(pass.TypesInfo, fn.Sel, timeDateFn) { + return true + } + return pattern.MatchString(fn.Sel.Name) + } + return false +} + +func isBasicLit(e ast.Expr) bool { + _, ok := e.(*ast.BasicLit) + return ok +} + +func isUntypedConst(p *analysis.Pass, e ast.Expr) bool { + t := p.TypesInfo.TypeOf(e) + if t == nil { + return false + } + + b, ok := t.(*types.Basic) + return ok && b.Info()&types.IsUntyped > 0 +} + +func isTypedConst(p *analysis.Pass, e ast.Expr) bool { + tt, ok := p.TypesInfo.Types[e] + return ok && tt.IsValue() && tt.Value != nil +} + +func isIdentNamedAsExpected(pattern *regexp.Regexp, e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && pattern.MatchString(id.Name) +} + +func isStructVarNamedAsExpected(pattern *regexp.Regexp, e ast.Expr) bool { + s, ok := e.(*ast.SelectorExpr) + return ok && isIdentNamedAsExpected(pattern, s.X) +} + +func isStructFieldNamedAsExpected(pattern *regexp.Regexp, e ast.Expr) bool { + s, ok := e.(*ast.SelectorExpr) + return ok && isIdentNamedAsExpected(pattern, s.Sel) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/float_compare.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/float_compare.go new file mode 100644 index 0000000000..10b1330de8 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/float_compare.go @@ -0,0 +1,70 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +// FloatCompare detects situation like +// +// assert.Equal(t, 42.42, result) +// assert.EqualValues(t, 42.42, result) +// assert.Exactly(t, 42.42, result) +// assert.True(t, result == 42.42) +// assert.False(t, result != 42.42) +// +// and requires +// +// assert.InEpsilon(t, 42.42, result, 0.0001) // Or assert.InDelta +type FloatCompare struct{} + +// NewFloatCompare constructs FloatCompare checker. +func NewFloatCompare() FloatCompare { return FloatCompare{} } +func (FloatCompare) Name() string { return "float-compare" } + +func (checker FloatCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + invalid := func() bool { + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + return len(call.Args) > 1 && (isFloat(pass, call.Args[0]) || isFloat(pass, call.Args[1])) + + case "True": + return len(call.Args) > 0 && isFloatCompare(pass, call.Args[0], token.EQL) + + case "False": + return len(call.Args) > 0 && isFloatCompare(pass, call.Args[0], token.NEQ) + } + return false + }() + + if invalid { + format := "use %s.InEpsilon (or InDelta)" + if call.Fn.IsFmt { + format = "use %s.InEpsilonf (or InDeltaf)" + } + return newDiagnostic(checker.Name(), call, fmt.Sprintf(format, call.SelectorXStr), nil) + } + return nil +} + +func isFloat(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + if t == nil { + return false + } + + bt, ok := t.Underlying().(*types.Basic) + return ok && (bt.Info()&types.IsFloat > 0) +} + +func isFloatCompare(p *analysis.Pass, e ast.Expr, op token.Token) bool { + be, ok := e.(*ast.BinaryExpr) + if !ok { + return false + } + return be.Op == op && (isFloat(p, be.X) || isFloat(p, be.Y)) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/go_require.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/go_require.go new file mode 100644 index 0000000000..4c0c9d1fd7 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/go_require.go @@ -0,0 +1,301 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +const ( + goRequireFnReportFormat = "%s contains assertions that must only be used in the goroutine running the test function" + goRequireCallReportFormat = "%s must only be used in the goroutine running the test function" +) + +// GoRequire takes idea from go vet's "testinggoroutine" check +// and detects usage of require package's functions or assert.FailNow in the non-test goroutines +// +// go func() { +// conn, err = lis.Accept() +// require.NoError(t, err) +// +// if assert.Error(err) { +// assert.FailNow(t, msg) +// } +// }() +type GoRequire struct{} + +// NewGoRequire constructs GoRequire checker. +func NewGoRequire() GoRequire { return GoRequire{} } +func (GoRequire) Name() string { return "go-require" } + +// Check should be consistent with +// https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/testinggoroutine/testinggoroutine.go +// +// But due to the fact that the Check covers cases missed by go vet, +// the implementation turned out to be terribly complicated. +// +// In simple words, the algorithm is as follows: +// - we walk along the call tree and store the status, whether we are in the test goroutine or not; +// - if we are in a test goroutine, then require is allowed, otherwise not; +// - when we encounter the launch of a subtest or `go` statement, the status changes; +// - in order to correctly handle the return to the correct status when exiting the current function, +// we have to store a stack of statuses (inGoroutineRunningTestFunc). +// +// Other test functions called in the test function are also analyzed to make a verdict about the current function. +// This leads to recursion, which the cache of processed functions (processedFuncs) helps reduce the impact of. +// Also, because of this, we have to pre-collect a list of test function declarations (testsDecls). +func (checker GoRequire) Check(pass *analysis.Pass, inspector *inspector.Inspector) (diagnostics []analysis.Diagnostic) { + testsDecls := make(funcDeclarations) + inspector.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + fd := node.(*ast.FuncDecl) + + if isTestingFuncOrMethod(pass, fd) { + if tf, ok := pass.TypesInfo.ObjectOf(fd.Name).(*types.Func); ok { + testsDecls[tf] = fd + } + } + }) + + var inGoroutineRunningTestFunc boolStack + processedFuncs := make(map[*ast.FuncDecl]goRequireVerdict) + + nodesFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.GoStmt)(nil), + (*ast.CallExpr)(nil), + } + inspector.Nodes(nodesFilter, func(node ast.Node, push bool) bool { + if fd, ok := node.(*ast.FuncDecl); ok { + if !isTestingFuncOrMethod(pass, fd) { + return false + } + + if push { + inGoroutineRunningTestFunc.Push(true) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + if _, ok := node.(*ast.GoStmt); ok { + if push { + inGoroutineRunningTestFunc.Push(false) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + ce := node.(*ast.CallExpr) + if isSubTestRun(pass, ce) { + if push { + // t.Run spawns the new testing goroutine and declines + // possible warnings from previous "simple" goroutine. + inGoroutineRunningTestFunc.Push(true) + } else { + inGoroutineRunningTestFunc.Pop() + } + return true + } + + if !push { + return false + } + if inGoroutineRunningTestFunc.Last() { + // We are in testing goroutine and can skip any assertion checks. + return true + } + + testifyCall := NewCallMeta(pass, ce) + if testifyCall != nil { + switch checker.checkCall(testifyCall) { + case goRequireVerdictRequire: + d := newDiagnostic(checker.Name(), testifyCall, fmt.Sprintf(goRequireCallReportFormat, "require"), nil) + diagnostics = append(diagnostics, *d) + + case goRequireVerdictAssertFailNow: + d := newDiagnostic(checker.Name(), testifyCall, fmt.Sprintf(goRequireCallReportFormat, testifyCall), nil) + diagnostics = append(diagnostics, *d) + + case goRequireVerdictNoExit: + } + return false + } + + // Case of nested function call. + { + calledFd := testsDecls.Get(pass, ce) + if calledFd == nil { + return true + } + + if v := checker.checkFunc(pass, calledFd, testsDecls, processedFuncs); v != goRequireVerdictNoExit { + caller := analysisutil.NodeString(pass.Fset, ce.Fun) + d := newDiagnostic(checker.Name(), ce, fmt.Sprintf(goRequireFnReportFormat, caller), nil) + diagnostics = append(diagnostics, *d) + } + } + return true + }) + + return diagnostics +} + +func (checker GoRequire) checkFunc( + pass *analysis.Pass, + fd *ast.FuncDecl, + testsDecls funcDeclarations, + processedFuncs map[*ast.FuncDecl]goRequireVerdict, +) (result goRequireVerdict) { + if v, ok := processedFuncs[fd]; ok { + return v + } + + ast.Inspect(fd, func(node ast.Node) bool { + if result != goRequireVerdictNoExit { + return false + } + + if _, ok := node.(*ast.GoStmt); ok { + return false + } + + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + + testifyCall := NewCallMeta(pass, ce) + if testifyCall != nil { + if v := checker.checkCall(testifyCall); v != goRequireVerdictNoExit { + result, processedFuncs[fd] = v, v + } + return false + } + + // Case of nested function call. + { + calledFd := testsDecls.Get(pass, ce) + if calledFd == nil { + return true + } + if calledFd == fd { + // Recursion. + return true + } + + if v := checker.checkFunc(pass, calledFd, testsDecls, processedFuncs); v != goRequireVerdictNoExit { + result = v + return false + } + return true + } + }) + + return result +} + +type goRequireVerdict int + +const ( + goRequireVerdictNoExit goRequireVerdict = iota + goRequireVerdictRequire + goRequireVerdictAssertFailNow +) + +func (checker GoRequire) checkCall(call *CallMeta) goRequireVerdict { + if !call.IsAssert { + return goRequireVerdictRequire + } + if call.Fn.NameFTrimmed == "FailNow" { + return goRequireVerdictAssertFailNow + } + return goRequireVerdictNoExit +} + +type funcDeclarations map[*types.Func]*ast.FuncDecl + +// Get returns the declaration of a called function or method. +// Currently, only static calls within the same package are supported, otherwise returns nil. +func (fd funcDeclarations) Get(pass *analysis.Pass, ce *ast.CallExpr) *ast.FuncDecl { + var obj types.Object + + switch fun := ce.Fun.(type) { + case *ast.SelectorExpr: + obj = pass.TypesInfo.ObjectOf(fun.Sel) + + case *ast.Ident: + obj = pass.TypesInfo.ObjectOf(fun) + + case *ast.IndexExpr: + if id, ok := fun.X.(*ast.Ident); ok { + obj = pass.TypesInfo.ObjectOf(id) + } + + case *ast.IndexListExpr: + if id, ok := fun.X.(*ast.Ident); ok { + obj = pass.TypesInfo.ObjectOf(id) + } + } + + if tf, ok := obj.(*types.Func); ok { + return fd[tf] + } + return nil +} + +type boolStack []bool + +func (s *boolStack) Push(v bool) { + *s = append(*s, v) +} + +func (s *boolStack) Pop() bool { + n := len(*s) + if n == 0 { + return false + } + + last := (*s)[n-1] + *s = (*s)[:n-1] + return last +} + +func (s boolStack) Last() bool { + n := len(s) + if n == 0 { + return false + } + return s[n-1] +} + +func isSubTestRun(pass *analysis.Pass, ce *ast.CallExpr) bool { + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok || se.Sel == nil { + return false + } + return (isTestingTPtr(pass, se.X) || implementsTestifySuiteIface(pass, se.X)) && se.Sel.Name == "Run" +} + +func isTestingFuncOrMethod(pass *analysis.Pass, fd *ast.FuncDecl) bool { + return hasTestingTParam(pass, fd) || isTestifySuiteMethod(pass, fd) +} + +func hasTestingTParam(pass *analysis.Pass, fd *ast.FuncDecl) bool { + if fd.Type == nil || fd.Type.Params == nil { + return false + } + + for _, param := range fd.Type.Params.List { + if isTestingTPtr(pass, param.Type) { + return true + } + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go new file mode 100644 index 0000000000..d4e6a48b5b --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go @@ -0,0 +1,97 @@ +package checkers + +import ( + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +// Len detects situations like +// +// assert.Equal(t, 3, len(arr)) +// assert.EqualValues(t, 3, len(arr)) +// assert.Exactly(t, 3, len(arr)) +// assert.True(t, len(arr) == 3) +// +// and requires +// +// assert.Len(t, arr, 3) +type Len struct{} + +// NewLen constructs Len checker. +func NewLen() Len { return Len{} } +func (Len) Name() string { return "len" } + +func (checker Len) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + const proposedFn = "Len" + + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + if len(call.Args) < 2 { + return nil + } + a, b := call.Args[0], call.Args[1] + + if lenArg, expectedLen, ok := xorLenCall(pass, a, b); ok { + if expectedLen == b && !isIntBasicLit(expectedLen) { + // https://github.com/Antonboom/testifylint/issues/9 + return nil + } + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: a.Pos(), + End: b.End(), + NewText: formatAsCallArgs(pass, lenArg, expectedLen), + }), + ) + } + + case "True": + if len(call.Args) < 1 { + return nil + } + expr := call.Args[0] + + if lenArg, expectedLen, ok := isLenEquality(pass, expr); ok && isIntBasicLit(expectedLen) { + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: expr.Pos(), + End: expr.End(), + NewText: formatAsCallArgs(pass, lenArg, expectedLen), + }), + ) + } + } + return nil +} + +func xorLenCall(pass *analysis.Pass, a, b ast.Expr) (lenArg ast.Expr, expectedLen ast.Expr, ok bool) { + arg1, ok1 := isBuiltinLenCall(pass, a) + arg2, ok2 := isBuiltinLenCall(pass, b) + + if xor(ok1, ok2) { + if ok1 { + return arg1, b, true + } + return arg2, a, true + } + return nil, nil, false +} + +func isLenEquality(pass *analysis.Pass, e ast.Expr) (ast.Expr, ast.Expr, bool) { + be, ok := e.(*ast.BinaryExpr) + if !ok { + return nil, nil, false + } + + if be.Op != token.EQL { + return nil, nil, false + } + return xorLenCall(pass, be.X, be.Y) +} + +func isIntBasicLit(e ast.Expr) bool { + bl, ok := e.(*ast.BasicLit) + return ok && bl.Kind == token.INT +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/nil_compare.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/nil_compare.go new file mode 100644 index 0000000000..89680a0699 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/nil_compare.go @@ -0,0 +1,69 @@ +package checkers + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// NilCompare detects situations like +// +// assert.Equal(t, nil, value) +// assert.EqualValues(t, nil, value) +// assert.Exactly(t, nil, value) +// +// assert.NotEqual(t, nil, value) +// assert.NotEqualValues(t, nil, value) +// +// and requires +// +// assert.Nil(t, value) +// assert.NotNil(t, value) +type NilCompare struct{} + +// NewNilCompare constructs NilCompare checker. +func NewNilCompare() NilCompare { return NilCompare{} } +func (NilCompare) Name() string { return "nil-compare" } + +func (checker NilCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if len(call.Args) < 2 { + return nil + } + + survivingArg, ok := xorNil(call.Args[0], call.Args[1]) + if !ok { + return nil + } + + var proposedFn string + + switch call.Fn.NameFTrimmed { + case "Equal", "EqualValues", "Exactly": + proposedFn = "Nil" + case "NotEqual", "NotEqualValues": + proposedFn = "NotNil" + default: + return nil + } + + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + newSuggestedFuncReplacement(call, proposedFn, analysis.TextEdit{ + Pos: call.Args[0].Pos(), + End: call.Args[1].End(), + NewText: analysisutil.NodeBytes(pass.Fset, survivingArg), + }), + ) +} + +func xorNil(first, second ast.Expr) (ast.Expr, bool) { + a, b := isNil(first), isNil(second) + if xor(a, b) { + if a { + return second, true + } + return first, true + } + return nil, false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/require_error.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/require_error.go new file mode 100644 index 0000000000..a76b1df7e2 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/require_error.go @@ -0,0 +1,325 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +const requireErrorReport = "for error assertions use require" + +// RequireError detects error assertions like +// +// assert.Error(t, err) // s.Error(err), s.Assert().Error(err) +// assert.ErrorIs(t, err, io.EOF) +// assert.ErrorAs(t, err, &target) +// assert.EqualError(t, err, "end of file") +// assert.ErrorContains(t, err, "end of file") +// assert.NoError(t, err) +// assert.NotErrorIs(t, err, io.EOF) +// +// and requires +// +// require.Error(t, err) // s.Require().Error(err), s.Require().Error(err) +// require.ErrorIs(t, err, io.EOF) +// require.ErrorAs(t, err, &target) +// ... +// +// RequireError ignores: +// - assertion in the `if` condition; +// - the entire `if-else[-if]` block, if there is an assertion in any `if` condition; +// - the last assertion in the block, if there are no methods/functions calls after it; +// - assertions in an explicit goroutine; +// - assertions in an explicit testing cleanup function or suite teardown methods; +// - sequence of NoError assertions. +type RequireError struct { + fnPattern *regexp.Regexp +} + +// NewRequireError constructs RequireError checker. +func NewRequireError() *RequireError { return new(RequireError) } +func (RequireError) Name() string { return "require-error" } + +func (checker *RequireError) SetFnPattern(p *regexp.Regexp) *RequireError { + if p != nil { + checker.fnPattern = p + } + return checker +} + +func (checker RequireError) Check(pass *analysis.Pass, inspector *inspector.Inspector) []analysis.Diagnostic { + callsByFunc := make(map[funcID][]*callMeta) + + // Stage 1. Collect meta information about any calls inside functions. + + inspector.WithStack([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool, stack []ast.Node) bool { + if !push { + return false + } + if len(stack) < 3 { + return true + } + + fID := findSurroundingFunc(pass, stack) + if fID == nil { + return true + } + + _, prevIsIfStmt := stack[len(stack)-2].(*ast.IfStmt) + _, prevIsAssignStmt := stack[len(stack)-2].(*ast.AssignStmt) + _, prevPrevIsIfStmt := stack[len(stack)-3].(*ast.IfStmt) + inIfCond := prevIsIfStmt || (prevPrevIsIfStmt && prevIsAssignStmt) + + callExpr := node.(*ast.CallExpr) + testifyCall := NewCallMeta(pass, callExpr) + + call := &callMeta{ + call: callExpr, + testifyCall: testifyCall, + rootIf: findRootIf(stack), + parentIf: findNearestNode[*ast.IfStmt](stack), + parentBlock: findNearestNode[*ast.BlockStmt](stack), + inIfCond: inIfCond, + inNoErrorSeq: false, // Will be filled in below. + } + + callsByFunc[*fID] = append(callsByFunc[*fID], call) + return testifyCall == nil // Do not support asserts in asserts. + }) + + // Stage 2. Analyze calls and block context. + + var diagnostics []analysis.Diagnostic + + callsByBlock := map[*ast.BlockStmt][]*callMeta{} + for _, calls := range callsByFunc { + for _, c := range calls { + if b := c.parentBlock; b != nil { + callsByBlock[b] = append(callsByBlock[b], c) + } + } + } + + markCallsInNoErrorSequence(callsByBlock) + + for funcInfo, calls := range callsByFunc { + for i, c := range calls { + if funcInfo.isTestCleanup { + continue + } + if funcInfo.isGoroutine { + continue + } + + if c.testifyCall == nil { + continue + } + if !c.testifyCall.IsAssert { + continue + } + switch c.testifyCall.Fn.NameFTrimmed { + default: + continue + case "Error", "ErrorIs", "ErrorAs", "EqualError", "ErrorContains", "NoError", "NotErrorIs": + } + + if needToSkipBasedOnContext(c, i, calls, callsByBlock) { + continue + } + if p := checker.fnPattern; p != nil && !p.MatchString(c.testifyCall.Fn.Name) { + continue + } + + diagnostics = append(diagnostics, + *newDiagnostic(checker.Name(), c.testifyCall, requireErrorReport, nil)) + } + } + + return diagnostics +} + +func needToSkipBasedOnContext( + currCall *callMeta, + currCallIndex int, + otherCalls []*callMeta, + callsByBlock map[*ast.BlockStmt][]*callMeta, +) bool { + if currCall.inNoErrorSeq { + // Skip `assert.NoError` sequence. + return true + } + + if currCall.inIfCond { + // Skip assertions in the "if condition". + return true + } + + if currCall.rootIf != nil { + for _, rootCall := range otherCalls { + if (rootCall.rootIf == currCall.rootIf) && rootCall.inIfCond { + // Skip assertions in the entire if-else[-if] block, if some of "if condition" contains assertion. + return true + } + } + } + + block := currCall.parentBlock + blockCalls := callsByBlock[block] + isLastCallInBlock := blockCalls[len(blockCalls)-1] == currCall + + noCallsAfter := true + + _, blockEndWithReturn := block.List[len(block.List)-1].(*ast.ReturnStmt) + if !blockEndWithReturn { + for i := currCallIndex + 1; i < len(otherCalls); i++ { + if (otherCalls[i].rootIf == nil) || (otherCalls[i].rootIf != currCall.rootIf) { + noCallsAfter = false + break + } + } + } + + // Skip assertion if this is the last operation in the test. + return isLastCallInBlock && noCallsAfter +} + +func findSurroundingFunc(pass *analysis.Pass, stack []ast.Node) *funcID { + for i := len(stack) - 2; i >= 0; i-- { + var fType *ast.FuncType + var fName string + var isTestCleanup bool + var isGoroutine bool + + switch fd := stack[i].(type) { + case *ast.FuncDecl: + fType, fName = fd.Type, fd.Name.Name + + if isTestifySuiteMethod(pass, fd) { + if ident := fd.Name; ident != nil && isAfterTestMethod(ident.Name) { + isTestCleanup = true + } + } + + case *ast.FuncLit: + fType, fName = fd.Type, "anonymous" + + if i >= 2 { //nolint:nestif + if ce, ok := stack[i-1].(*ast.CallExpr); ok { + if se, ok := ce.Fun.(*ast.SelectorExpr); ok { + isTestCleanup = isTestingTPtr(pass, se.X) && se.Sel != nil && (se.Sel.Name == "Cleanup") + } + + if _, ok := stack[i-2].(*ast.GoStmt); ok { + isGoroutine = true + } + } + } + + default: + continue + } + + return &funcID{ + pos: fType.Pos(), + posStr: pass.Fset.Position(fType.Pos()).String(), + name: fName, + isTestCleanup: isTestCleanup, + isGoroutine: isGoroutine, + } + } + return nil +} + +func findRootIf(stack []ast.Node) *ast.IfStmt { + nearestIf, i := findNearestNodeWithIdx[*ast.IfStmt](stack) + for ; i > 0; i-- { + parent, ok := stack[i-1].(*ast.IfStmt) + if ok { + nearestIf = parent + } else { + break + } + } + return nearestIf +} + +func findNearestNode[T ast.Node](stack []ast.Node) (v T) { + v, _ = findNearestNodeWithIdx[T](stack) + return +} + +func findNearestNodeWithIdx[T ast.Node](stack []ast.Node) (v T, index int) { + for i := len(stack) - 2; i >= 0; i-- { + if n, ok := stack[i].(T); ok { + return n, i + } + } + return +} + +func markCallsInNoErrorSequence(callsByBlock map[*ast.BlockStmt][]*callMeta) { + for _, calls := range callsByBlock { + for i, c := range calls { + if c.testifyCall == nil { + continue + } + + var prevIsNoError bool + if i > 0 { + if prev := calls[i-1].testifyCall; prev != nil { + prevIsNoError = isNoErrorAssertion(prev.Fn.Name) + } + } + + var nextIsNoError bool + if i < len(calls)-1 { + if next := calls[i+1].testifyCall; next != nil { + nextIsNoError = isNoErrorAssertion(next.Fn.Name) + } + } + + if isNoErrorAssertion(c.testifyCall.Fn.Name) && (prevIsNoError || nextIsNoError) { + calls[i].inNoErrorSeq = true + } + } + } +} + +type callMeta struct { + call *ast.CallExpr + testifyCall *CallMeta + rootIf *ast.IfStmt // The root `if` in if-else[-if] chain. + parentIf *ast.IfStmt // The nearest `if`, can be equal with rootIf. + parentBlock *ast.BlockStmt + inIfCond bool // True for code like `if assert.ErrorAs(t, err, &target) {`. + inNoErrorSeq bool // True for sequence of `assert.NoError` assertions. +} + +type funcID struct { + pos token.Pos + posStr string + name string + isTestCleanup bool + isGoroutine bool +} + +func (id funcID) String() string { + return fmt.Sprintf("%s at %s", id.name, id.posStr) +} + +func isAfterTestMethod(name string) bool { + // https://github.com/stretchr/testify/blob/master/suite/interfaces.go + switch name { + case "TearDownSuite", "TearDownTest", "AfterTest", "HandleStats", "TearDownSubTest": + return true + } + return false +} + +func isNoErrorAssertion(fnName string) bool { + return (fnName == "NoError") || (fnName == "NoErrorf") +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_dont_use_pkg.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_dont_use_pkg.go new file mode 100644 index 0000000000..bf84f6378e --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_dont_use_pkg.go @@ -0,0 +1,96 @@ +package checkers + +import ( + "fmt" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// SuiteDontUsePkg detects situation like +// +// func (s *MySuite) TestSomething() { +// assert.Equal(s.T(), 42, value) +// } +// +// and requires +// +// func (s *MySuite) TestSomething() { +// s.Equal(42, value) +// } +type SuiteDontUsePkg struct{} + +// NewSuiteDontUsePkg constructs SuiteDontUsePkg checker. +func NewSuiteDontUsePkg() SuiteDontUsePkg { return SuiteDontUsePkg{} } +func (SuiteDontUsePkg) Name() string { return "suite-dont-use-pkg" } + +func (checker SuiteDontUsePkg) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if !call.IsPkg { + return nil + } + + args := call.ArgsRaw + if len(args) < 2 { + return nil + } + t := args[0] + + ce, ok := t.(*ast.CallExpr) + if !ok { + return nil + } + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok { + return nil + } + if se.X == nil || !implementsTestifySuiteIface(pass, se.X) { + return nil + } + if se.Sel == nil || se.Sel.Name != "T" { + return nil + } + rcv, ok := se.X.(*ast.Ident) // At this point we ensure that `s.T()` is used as the first argument of assertion. + if !ok { + return nil + } + + newSelector := rcv.Name + if !call.IsAssert { + newSelector += "." + "Require()" + } + + msg := fmt.Sprintf("use %s.%s", newSelector, call.Fn.Name) + return newDiagnostic(checker.Name(), call, msg, &analysis.SuggestedFix{ + Message: fmt.Sprintf("Replace `%s` with `%s`", call.SelectorXStr, newSelector), + TextEdits: []analysis.TextEdit{ + // Replace package function with suite method. + { + Pos: call.Selector.X.Pos(), + End: call.Selector.X.End(), + NewText: []byte(newSelector), + }, + // Remove `s.T()`. + { + Pos: t.Pos(), + End: args[1].Pos(), + NewText: []byte(""), + }, + }, + }) +} + +func implementsTestifySuiteIface(pass *analysis.Pass, rcv ast.Expr) bool { + suiteIface := analysisutil.ObjectOf(pass.Pkg, testify.SuitePkgPath, "TestingSuite") + if suiteIface == nil { + return false + } + + return types.Implements( + pass.TypesInfo.TypeOf(rcv), + suiteIface.Type().Underlying().(*types.Interface), + ) +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_extra_assert_call.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_extra_assert_call.go new file mode 100644 index 0000000000..791488b651 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_extra_assert_call.go @@ -0,0 +1,99 @@ +package checkers + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// SuiteExtraAssertCallMode reflects different modes of work of SuiteExtraAssertCall checker. +type SuiteExtraAssertCallMode int + +const ( + SuiteExtraAssertCallModeRemove SuiteExtraAssertCallMode = iota + SuiteExtraAssertCallModeRequire +) + +const DefaultSuiteExtraAssertCallMode = SuiteExtraAssertCallModeRemove + +// SuiteExtraAssertCall detects situation like +// +// func (s *MySuite) TestSomething() { +// s.Assert().Equal(42, value) +// } +// +// and requires +// +// func (s *MySuite) TestSomething() { +// s.Equal(42, value) +// } +// +// or vice versa (depending on the configurable mode). +type SuiteExtraAssertCall struct { + mode SuiteExtraAssertCallMode +} + +// NewSuiteExtraAssertCall constructs SuiteExtraAssertCall checker. +func NewSuiteExtraAssertCall() *SuiteExtraAssertCall { + return &SuiteExtraAssertCall{mode: DefaultSuiteExtraAssertCallMode} +} + +func (SuiteExtraAssertCall) Name() string { return "suite-extra-assert-call" } + +func (checker *SuiteExtraAssertCall) SetMode(m SuiteExtraAssertCallMode) *SuiteExtraAssertCall { + checker.mode = m + return checker +} + +func (checker SuiteExtraAssertCall) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + if call.IsPkg { + return nil + } + + switch checker.mode { + case SuiteExtraAssertCallModeRequire: + x, ok := call.Selector.X.(*ast.Ident) // s.True + if !ok || x == nil || !implementsTestifySuiteIface(pass, x) { + return nil + } + + msg := fmt.Sprintf("use an explicit %s.Assert().%s", analysisutil.NodeString(pass.Fset, x), call.Fn.Name) + return newDiagnostic(checker.Name(), call, msg, &analysis.SuggestedFix{ + Message: "Add `Assert()` call", + TextEdits: []analysis.TextEdit{{ + Pos: x.End(), + End: x.End(), // Pure insertion. + NewText: []byte(".Assert()"), + }}, + }) + + case SuiteExtraAssertCallModeRemove: + x, ok := call.Selector.X.(*ast.CallExpr) // s.Assert().True + if !ok { + return nil + } + + se, ok := x.Fun.(*ast.SelectorExpr) + if !ok || se == nil || !implementsTestifySuiteIface(pass, se.X) { + return nil + } + if se.Sel == nil || se.Sel.Name != "Assert" { + return nil + } + + msg := fmt.Sprintf("need to simplify the assertion to %s.%s", analysisutil.NodeString(pass.Fset, se.X), call.Fn.Name) + return newDiagnostic(checker.Name(), call, msg, &analysis.SuggestedFix{ + Message: "Remove `Assert()` call", + TextEdits: []analysis.TextEdit{{ + Pos: se.Sel.Pos(), + End: x.End() + 1, // +1 for dot. + NewText: []byte(""), + }}, + }) + } + + return nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go new file mode 100644 index 0000000000..5cadc93ada --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go @@ -0,0 +1,130 @@ +package checkers + +import ( + "fmt" + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// SuiteTHelper requires t.Helper() call in suite helpers: +// +// func (s *RoomSuite) assertRoomRound(roundID RoundID) { +// s.T().Helper() +// s.Equal(roundID, s.getRoom().CurrentRound.ID) +// } +type SuiteTHelper struct{} + +// NewSuiteTHelper constructs SuiteTHelper checker. +func NewSuiteTHelper() SuiteTHelper { return SuiteTHelper{} } +func (SuiteTHelper) Name() string { return "suite-thelper" } + +func (checker SuiteTHelper) Check(pass *analysis.Pass, inspector *inspector.Inspector) (diagnostics []analysis.Diagnostic) { + inspector.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + fd := node.(*ast.FuncDecl) + if !isTestifySuiteMethod(pass, fd) { + return + } + + if ident := fd.Name; ident == nil || isTestMethod(ident.Name) || isServiceMethod(ident.Name) { + return + } + + if !containsSuiteAssertions(pass, fd) { + return + } + + rcv := fd.Recv.List[0] + if len(rcv.Names) != 1 || rcv.Names[0] == nil { + return + } + rcvName := rcv.Names[0].Name + + helperCallStr := fmt.Sprintf("%s.T().Helper()", rcvName) + + firstStmt := fd.Body.List[0] + if analysisutil.NodeString(pass.Fset, firstStmt) == helperCallStr { + return + } + + msg := fmt.Sprintf("suite helper method must start with " + helperCallStr) + d := newDiagnostic(checker.Name(), fd, msg, &analysis.SuggestedFix{ + Message: fmt.Sprintf("Insert `%s`", helperCallStr), + TextEdits: []analysis.TextEdit{ + { + Pos: firstStmt.Pos(), + End: firstStmt.Pos(), // Pure insertion. + NewText: []byte(helperCallStr + "\n\n"), + }, + }, + }) + diagnostics = append(diagnostics, *d) + }) + return diagnostics +} + +func isTestifySuiteMethod(pass *analysis.Pass, fDecl *ast.FuncDecl) bool { + if fDecl.Recv == nil || len(fDecl.Recv.List) != 1 { + return false + } + + rcv := fDecl.Recv.List[0] + return implementsTestifySuiteIface(pass, rcv.Type) +} + +func isTestMethod(name string) bool { + return strings.HasPrefix(name, "Test") +} + +func isServiceMethod(name string) bool { + // https://github.com/stretchr/testify/blob/master/suite/interfaces.go + switch name { + case "T", "SetT", "SetS", "SetupSuite", "SetupTest", "TearDownSuite", "TearDownTest", + "BeforeTest", "AfterTest", "HandleStats", "SetupSubTest", "TearDownSubTest": + return true + } + return false +} + +func containsSuiteAssertions(pass *analysis.Pass, fn *ast.FuncDecl) bool { + if fn.Body == nil { + return false + } + + for _, s := range fn.Body.List { + if isSuiteAssertion(pass, s) { + return true + } + } + return false +} + +func isSuiteAssertion(pass *analysis.Pass, stmt ast.Stmt) bool { + expr, ok := stmt.(*ast.ExprStmt) + if !ok { + return false + } + + ce, ok := expr.X.(*ast.CallExpr) + if !ok { + return false + } + + se, ok := ce.Fun.(*ast.SelectorExpr) + if !ok || se.Sel == nil { + return false + } + + if sel, ok := pass.TypesInfo.Selections[se]; ok { + pkg := sel.Obj().Pkg() + isAssert := analysisutil.IsPkg(pkg, testify.AssertPkgName, testify.AssertPkgPath) + isRequire := analysisutil.IsPkg(pkg, testify.RequirePkgName, testify.RequirePkgPath) + return isAssert || isRequire + } + return false +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go b/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go new file mode 100644 index 0000000000..669f9d187d --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go @@ -0,0 +1,71 @@ +package checkers + +import ( + "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" +) + +// UselessAssert detects useless asserts like +// +// 1) Asserting of the same variable +// +// assert.Equal(t, tt.value, tt.value) +// assert.ElementsMatch(t, users, users) +// ... +// +// 2) Open for contribution... +type UselessAssert struct{} + +// NewUselessAssert constructs UselessAssert checker. +func NewUselessAssert() UselessAssert { return UselessAssert{} } +func (UselessAssert) Name() string { return "useless-assert" } + +func (checker UselessAssert) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + switch call.Fn.NameFTrimmed { + case + "Contains", + "ElementsMatch", + "Equal", + "EqualExportedValues", + "EqualValues", + "ErrorAs", + "ErrorIs", + "Exactly", + "Greater", + "GreaterOrEqual", + "Implements", + "InDelta", + "InDeltaMapValues", + "InDeltaSlice", + "InEpsilon", + "InEpsilonSlice", + "IsType", + "JSONEq", + "Less", + "LessOrEqual", + "NotEqual", + "NotEqualValues", + "NotErrorIs", + "NotRegexp", + "NotSame", + "NotSubset", + "Regexp", + "Same", + "Subset", + "WithinDuration", + "YAMLEq": + default: + return nil + } + + if len(call.Args) < 2 { + return nil + } + first, second := call.Args[0], call.Args[1] + + if analysisutil.NodeString(pass.Fset, first) == analysisutil.NodeString(pass.Fset, second) { + return newDiagnostic(checker.Name(), call, "asserting of the same variable", nil) + } + return nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/config/config.go b/vendor/github.com/Antonboom/testifylint/internal/config/config.go new file mode 100644 index 0000000000..6dcfbdb52e --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/config/config.go @@ -0,0 +1,103 @@ +package config + +import ( + "errors" + "flag" + "fmt" + + "github.com/Antonboom/testifylint/internal/checkers" +) + +// NewDefault builds default testifylint config. +func NewDefault() Config { + return Config{ + EnableAll: false, + DisabledCheckers: nil, + DisableAll: false, + EnabledCheckers: nil, + ExpectedActual: ExpectedActualConfig{ + ExpVarPattern: RegexpValue{checkers.DefaultExpectedVarPattern}, + }, + RequireError: RequireErrorConfig{ + FnPattern: RegexpValue{nil}, + }, + SuiteExtraAssertCall: SuiteExtraAssertCallConfig{ + Mode: checkers.DefaultSuiteExtraAssertCallMode, + }, + } +} + +// Config implements testifylint configuration. +type Config struct { + EnableAll bool + DisabledCheckers KnownCheckersValue + DisableAll bool + EnabledCheckers KnownCheckersValue + + ExpectedActual ExpectedActualConfig + RequireError RequireErrorConfig + SuiteExtraAssertCall SuiteExtraAssertCallConfig +} + +// ExpectedActualConfig implements configuration of checkers.ExpectedActual. +type ExpectedActualConfig struct { + ExpVarPattern RegexpValue +} + +// RequireErrorConfig implements configuration of checkers.RequireError. +type RequireErrorConfig struct { + FnPattern RegexpValue +} + +// SuiteExtraAssertCallConfig implements configuration of checkers.SuiteExtraAssertCall. +type SuiteExtraAssertCallConfig struct { + Mode checkers.SuiteExtraAssertCallMode +} + +func (cfg Config) Validate() error { + if cfg.EnableAll { + if cfg.DisableAll { + return errors.New("enable-all and disable-all options must not be combined") + } + + if len(cfg.EnabledCheckers) != 0 { + return errors.New("enable-all and enable options must not be combined") + } + } + + if cfg.DisableAll { + if len(cfg.DisabledCheckers) != 0 { + return errors.New("disable-all and disable options must not be combined") + } + + if len(cfg.EnabledCheckers) == 0 { + return errors.New("all checkers were disabled, but no one checker was enabled: at least one must be enabled") + } + } + + for _, checker := range cfg.DisabledCheckers { + if cfg.EnabledCheckers.Contains(checker) { + return fmt.Errorf("checker %q disabled and enabled at one moment", checker) + } + } + + return nil +} + +// BindToFlags binds Config fields to according flags. +func BindToFlags(cfg *Config, fs *flag.FlagSet) { + fs.BoolVar(&cfg.EnableAll, "enable-all", false, "enable all checkers") + fs.Var(&cfg.DisabledCheckers, "disable", "comma separated list of disabled checkers (to exclude from enabled by default)") + fs.BoolVar(&cfg.DisableAll, "disable-all", false, "disable all checkers") + fs.Var(&cfg.EnabledCheckers, "enable", "comma separated list of enabled checkers (in addition to enabled by default)") + + fs.Var(&cfg.ExpectedActual.ExpVarPattern, "expected-actual.pattern", "regexp for expected variable name") + fs.Var(&cfg.RequireError.FnPattern, "require-error.fn-pattern", "regexp for error assertions that should only be analyzed") + fs.Var(NewEnumValue(suiteExtraAssertCallModeAsString, &cfg.SuiteExtraAssertCall.Mode), + "suite-extra-assert-call.mode", "to require or remove extra Assert() call") +} + +var suiteExtraAssertCallModeAsString = map[string]checkers.SuiteExtraAssertCallMode{ + "remove": checkers.SuiteExtraAssertCallModeRemove, + "require": checkers.SuiteExtraAssertCallModeRequire, +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/config/flag_value_types.go b/vendor/github.com/Antonboom/testifylint/internal/config/flag_value_types.go new file mode 100644 index 0000000000..5b08ec47b1 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/config/flag_value_types.go @@ -0,0 +1,114 @@ +package config + +import ( + "flag" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/Antonboom/testifylint/internal/checkers" +) + +var ( + _ flag.Value = (*KnownCheckersValue)(nil) + _ flag.Value = (*RegexpValue)(nil) + _ flag.Value = (*EnumValue[checkers.SuiteExtraAssertCallMode])(nil) +) + +// KnownCheckersValue implements comma separated list of testify checkers. +type KnownCheckersValue []string + +func (kcv KnownCheckersValue) String() string { + return strings.Join(kcv, ",") +} + +func (kcv *KnownCheckersValue) Set(v string) error { + chckrs := strings.Split(v, ",") + for _, checkerName := range chckrs { + if ok := checkers.IsKnown(checkerName); !ok { + return fmt.Errorf("unknown checker %q", checkerName) + } + } + + *kcv = chckrs + return nil +} + +func (kcv KnownCheckersValue) Contains(v string) bool { + for _, checker := range kcv { + if checker == v { + return true + } + } + return false +} + +// RegexpValue is a special wrapper for support of flag.FlagSet over regexp.Regexp. +// Original regexp is available through RegexpValue.Regexp. +type RegexpValue struct { + *regexp.Regexp +} + +func (rv RegexpValue) String() string { + if rv.Regexp == nil { + return "" + } + return rv.Regexp.String() +} + +func (rv *RegexpValue) Set(v string) error { + compiled, err := regexp.Compile(v) + if err != nil { + return err + } + + rv.Regexp = compiled + return nil +} + +// EnumValue is a special type for support of flag.FlagSet over user-defined constants. +type EnumValue[EnumT comparable] struct { + mapping map[string]EnumT + keys []string + dst *EnumT +} + +// NewEnumValue takes the "enum-value-name to enum-value" mapping and a destination for the value passed through the CLI. +// Returns an EnumValue instance suitable for flag.FlagSet.Var. +func NewEnumValue[EnumT comparable](mapping map[string]EnumT, dst *EnumT) *EnumValue[EnumT] { + keys := make([]string, 0, len(mapping)) + for k := range mapping { + keys = append(keys, k) + } + sort.Strings(keys) + + return &EnumValue[EnumT]{ + mapping: mapping, + keys: keys, + dst: dst, + } +} + +func (e EnumValue[EnumT]) String() string { + if e.dst == nil { + return "" + } + + for k, v := range e.mapping { + if v == *e.dst { + return k + } + } + return "" +} + +func (e *EnumValue[EnumT]) Set(s string) error { + v, ok := e.mapping[s] + if !ok { + return fmt.Errorf("use one of (%v)", strings.Join(e.keys, " | ")) + } + + *e.dst = v + return nil +} diff --git a/vendor/github.com/Antonboom/testifylint/internal/testify/const.go b/vendor/github.com/Antonboom/testifylint/internal/testify/const.go new file mode 100644 index 0000000000..3476e40402 --- /dev/null +++ b/vendor/github.com/Antonboom/testifylint/internal/testify/const.go @@ -0,0 +1,17 @@ +package testify + +const ( + ModulePath = "github.com/stretchr/testify" + + AssertPkgName = "assert" + HTTPPkgName = "http" + MockPkgName = "mock" + RequirePkgName = "require" + SuitePkgName = "suite" + + AssertPkgPath = ModulePath + "/" + AssertPkgName + HTTPPkgPath = ModulePath + "/" + HTTPPkgName + MockPkgPath = ModulePath + "/" + MockPkgName + RequirePkgPath = ModulePath + "/" + RequirePkgName + SuitePkgPath = ModulePath + "/" + SuitePkgName +) diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 0ca1dc4fee..4d38f3bfce 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -91,7 +91,7 @@ const ( // UnmarshalText method. See the Unmarshaler example for a demonstration with // email addresses. // -// ### Key mapping +// # Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go struct. // The special `toml` struct tag can be used to map TOML keys to struct fields @@ -248,7 +248,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - if rv.NumMethod() > 0 { // Only support empty interfaces are supported. + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index c6af3f239d..b9e309717e 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -5,17 +5,25 @@ import ( "io" ) +// TextMarshaler is an alias for encoding.TextMarshaler. +// // Deprecated: use encoding.TextMarshaler type TextMarshaler encoding.TextMarshaler +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// // Deprecated: use MetaData.PrimitiveDecode. func PrimitiveDecode(primValue Primitive, v interface{}) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// // Deprecated: use NewDecoder(reader).Decode(&value). func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 930e1d521a..9cd25d7571 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -136,7 +136,8 @@ func NewEncoder(w io.Writer) *Encoder { // document. func (enc *Encoder) Encode(v interface{}) error { rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { return err } return enc.w.Flush() @@ -457,6 +458,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // @@ -471,17 +482,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - if is32Bit { - copyStart := make([]int, len(start)) - copy(copyStart, start) - fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) } } } @@ -490,24 +491,27 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { writeFields := func(fields [][]int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) - fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) + fieldVal := rv.FieldByIndex(fieldIndex) - if isNil(fieldVal) { /// Don't write anything for nil fields. + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { continue } - opts := getOptions(fieldType.Tag) - if opts.skip { + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && enc.isEmpty(fieldVal) { - continue - } if opts.omitzero && isZero(fieldVal) { continue } @@ -649,7 +653,7 @@ func isZero(rv reflect.Value) bool { return false } -func (enc *Encoder) isEmpty(rv reflect.Value) bool { +func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 @@ -664,13 +668,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool { // type b struct{ s []string } // s := a{field: b{s: []string{"AAA"}}} for i := 0; i < rv.NumField(); i++ { - if !enc.isEmpty(rv.Field(i)) { + if !isEmpty(rv.Field(i)) { return false } } return true case reflect.Bool: return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() } return false } @@ -693,8 +699,11 @@ func (enc *Encoder) newline() { // v v v v vv // key = {k = 1, k2 = 2} func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. if len(key) == 0 { - encPanic(errNoKey) + enc.eElement(val) + return } enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index f4f390e647..efd68865bb 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -84,7 +84,7 @@ func (pe ParseError) Error() string { pe.Position.Line, pe.LastKey, msg) } -// ErrorWithUsage() returns the error with detailed location context. +// ErrorWithPosition returns the error with detailed location context. // // See the documentation on [ParseError]. func (pe ParseError) ErrorWithPosition() string { @@ -124,7 +124,7 @@ func (pe ParseError) ErrorWithPosition() string { return b.String() } -// ErrorWithUsage() returns the error with detailed location context and usage +// ErrorWithUsage returns the error with detailed location context and usage // guidance. // // See the documentation on [ParseError]. diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index d4d70871d8..3545a6ad66 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -46,12 +46,13 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -87,13 +88,14 @@ func (lx *lexer) nextItem() item { } } -func lex(input string) *lexer { +func lex(input string, tomlNext bool) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, } return lx } @@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r) { + if isBareKeyChar(r, lx.tomlNext) { return lexBareName } lx.backup() @@ -618,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValue) @@ -640,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValueEnd) @@ -648,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } return lx.errorf("trailing comma not allowed in inline tables") } return lexInlineTableValue @@ -770,8 +781,8 @@ func lexRawString(lx *lexer) stateFn { } } -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning ''' has already been consumed and +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() @@ -828,6 +839,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn { func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough case 'b': fallthrough case 't': @@ -846,6 +862,11 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '\\': return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape case 'u': return lexShortUnicodeEscape case 'U': @@ -854,6 +875,19 @@ func lexStringEscape(lx *lexer) stateFn { return lx.error(errLexEscape{r}) } +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected two hexadecimal digits after '\x', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { @@ -1225,7 +1259,23 @@ func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHexadecimal(r rune) bool { return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') } -func isBareKeyChar(r rune) bool { + +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 71847a0415..2e78b24e95 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -106,7 +106,7 @@ func (k Key) maybeQuoted(i int) string { return `""` } for _, c := range k[i] { - if !isBareKeyChar(c) { + if !isBareKeyChar(c, false) { return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index d2542d6f92..9c19153698 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,7 @@ package toml import ( "fmt" + "os" "strconv" "strings" "time" @@ -15,6 +16,7 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. + tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. @@ -29,6 +31,8 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -41,9 +45,12 @@ func parse(data string) (p *parser, err error) { }() // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] } // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 @@ -65,9 +72,10 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), mapping: make(map[string]interface{}), - lx: lex(data), + lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), + tomlNext: tomlNext, } for { item := p.next() @@ -194,12 +202,12 @@ func (p *parser) topLevel(item item) { for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set value. vItem := p.next() val, typ := p.value(vItem, false) p.set(p.currentKey, val, typ, vItem.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -236,7 +244,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { case itemString: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -331,11 +339,17 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { var dtTypes = []struct { fmt string zone *time.Location + next bool }{ - {time.RFC3339Nano, time.Local}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, - {"2006-01-02", internal.LocalDate}, - {"15:04:05.999999999", internal.LocalTime}, + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, } func (p *parser) valueDatetime(it item) (interface{}, tomlType) { @@ -346,6 +360,9 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { err error ) for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { ok = true @@ -384,6 +401,7 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? + _ = types } return array, tomlArray } @@ -426,11 +444,11 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set the value. val, typ := p.value(p.next(), false) p.set(p.currentKey, val, typ, it.pos) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) hash[p.currentKey] = val /// Restore context. @@ -551,7 +569,6 @@ func (p *parser) addContext(key Key, array bool) { func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) { p.setValue(key, val) p.setType(key, typ, pos) - } // setValue sets the given key to the given value in the current context. @@ -632,14 +649,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) { // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } -func (p *parser) addImplicitContext(key Key) { - p.addImplicit(key) - p.addContext(key, false) -} +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } // current returns the full key name of the current context. func (p *parser) current() string { @@ -662,49 +676,54 @@ func stripFirstNewline(s string) string { return s } -// Remove newlines inside triple-quoted strings if a line ends with "\". +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. func (p *parser) stripEscapedNewlines(s string) string { - split := strings.Split(s, "\n") - if len(split) < 1 { - return s - } - - escNL := false // Keep track of the last non-blank line was escaped. - for i, line := range split { - line = strings.TrimRight(line, " \t\r") - - if len(line) == 0 || line[len(line)-1] != '\\' { - split[i] = strings.TrimRight(split[i], "\r") - if !escNL && i != len(split)-1 { - split[i] += "\n" - } - continue + var b strings.Builder + var i int + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() } + i += ix - escBS := true - for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { - escBS = !escBS + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue } - if escNL { - line = strings.TrimLeft(line, " \t\r") + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } } - escNL = !escBS - - if escBS { - split[i] += "\n" + if j == i+1 { + // Not a whitespace escape. + i++ continue } - - if i == len(split)-1 { - p.panicf("invalid escape: '\\ '") - } - - split[i] = line[:len(line)-1] // Remove \ - if len(split)-1 > i { - split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. + // (It's a bad escape sequence, but we can let + // replaceEscapes catch it.) + i++ + continue } + b.WriteString(s[:i]) + s = s[j:] + i = 0 } - return strings.Join(split, "") } func (p *parser) replaceEscapes(it item, str string) string { @@ -743,12 +762,23 @@ func (p *parser) replaceEscapes(it item, str string) string { case 'r': replaced = append(replaced, rune(0x000D)) r += 1 + case 'e': + if p.tomlNext { + replaced = append(replaced, rune(0x001B)) + r += 1 + } case '"': replaced = append(replaced, rune(0x0022)) r += 1 case '\\': replaced = append(replaced, rune(0x005C)) r += 1 + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3]) + replaced = append(replaced, escaped) + r += 3 + } case 'u': // At this point, we know we have a Unicode escape of the form // `uXXXX` at [r, r+5). (Because the lexer guarantees this diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/LICENSE b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/LICENSE new file mode 100644 index 0000000000..6698196c5a --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Gaijin Entertainment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go new file mode 100644 index 0000000000..b490f1c640 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go @@ -0,0 +1,291 @@ +package analyzer + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment" + "github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern" + "github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure" +) + +type analyzer struct { + include pattern.List `exhaustruct:"optional"` + exclude pattern.List `exhaustruct:"optional"` + + structFields structure.FieldsCache `exhaustruct:"optional"` + comments comment.Cache `exhaustruct:"optional"` + + typeProcessingNeed map[string]bool + typeProcessingNeedMu sync.RWMutex `exhaustruct:"optional"` +} + +func NewAnalyzer(include, exclude []string) (*analysis.Analyzer, error) { + a := analyzer{ + typeProcessingNeed: make(map[string]bool), + comments: comment.Cache{}, + } + + var err error + + a.include, err = pattern.NewList(include...) + if err != nil { + return nil, err //nolint:wrapcheck + } + + a.exclude, err = pattern.NewList(exclude...) + if err != nil { + return nil, err //nolint:wrapcheck + } + + return &analysis.Analyzer{ //nolint:exhaustruct + Name: "exhaustruct", + Doc: "Checks if all structure fields are initialized", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: a.newFlagSet(), + }, nil +} + +func (a *analyzer) newFlagSet() flag.FlagSet { + fs := flag.NewFlagSet("", flag.PanicOnError) + + fs.Var(&a.include, "i", `Regular expression to match type names, can receive multiple flags. +Anonymous structs can be matched by '' alias. +4ex: + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\. + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\.TypeInfo`) + fs.Var(&a.exclude, "e", `Regular expression to exclude type names, can receive multiple flags. +Anonymous structs can be matched by '' alias. +4ex: + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\. + github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer\.TypeInfo`) + + return *fs +} + +func (a *analyzer) run(pass *analysis.Pass) (any, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) //nolint:forcetypeassert + + insp.WithStack([]ast.Node{(*ast.CompositeLit)(nil)}, a.newVisitor(pass)) + + return nil, nil //nolint:nilnil +} + +// newVisitor returns visitor that only expects [ast.CompositeLit] nodes. +func (a *analyzer) newVisitor(pass *analysis.Pass) func(n ast.Node, push bool, stack []ast.Node) bool { + return func(n ast.Node, push bool, stack []ast.Node) bool { + if !push { + return true + } + + lit, ok := n.(*ast.CompositeLit) + if !ok { + // this should never happen, but better be prepared + return true + } + + structTyp, typeInfo, ok := getStructType(pass, lit) + if !ok { + return true + } + + if len(lit.Elts) == 0 { + if ret, ok := stackParentIsReturn(stack); ok { + if returnContainsNonNilError(pass, ret) { + // it is okay to return uninitialized structure in case struct's direct parent is + // a return statement containing non-nil error + // + // we're unable to check if returned error is custom, but at least we're able to + // cover str [error] type. + return true + } + } + } + + file := a.comments.Get(pass.Fset, stack[0].(*ast.File)) //nolint:forcetypeassert + rc := getCompositeLitRelatedComments(stack, file) + pos, msg := a.processStruct(pass, lit, structTyp, typeInfo, rc) + + if pos != nil { + pass.Reportf(*pos, msg) + } + + return true + } +} + +// getCompositeLitRelatedComments returns all comments that are related to checked node. We +// have to traverse the stack manually as ast do not associate comments with +// [ast.CompositeLit]. +func getCompositeLitRelatedComments(stack []ast.Node, cm ast.CommentMap) []*ast.CommentGroup { + comments := make([]*ast.CommentGroup, 0) + + for i := len(stack) - 1; i >= 0; i-- { + node := stack[i] + + switch node.(type) { + case *ast.CompositeLit, // stack[len(stack)-1] + *ast.ReturnStmt, // return ... + *ast.IndexExpr, // map[enum]...{...}[key] + *ast.CallExpr, // myfunc(map...) + *ast.UnaryExpr, // &map... + *ast.AssignStmt, // variable assignment (without var keyword) + *ast.DeclStmt, // var declaration, parent of *ast.GenDecl + *ast.GenDecl, // var declaration, parent of *ast.ValueSpec + *ast.ValueSpec: // var declaration + comments = append(comments, cm[node]...) + + default: + return comments + } + } + + return comments +} + +func getStructType(pass *analysis.Pass, lit *ast.CompositeLit) (*types.Struct, *TypeInfo, bool) { + switch typ := pass.TypesInfo.TypeOf(lit).(type) { + case *types.Named: // named type + if structTyp, ok := typ.Underlying().(*types.Struct); ok { + pkg := typ.Obj().Pkg() + ti := TypeInfo{ + Name: typ.Obj().Name(), + PackageName: pkg.Name(), + PackagePath: pkg.Path(), + } + + return structTyp, &ti, true + } + + return nil, nil, false + + case *types.Struct: // anonymous struct + ti := TypeInfo{ + Name: "", + PackageName: pass.Pkg.Name(), + PackagePath: pass.Pkg.Path(), + } + + return typ, &ti, true + + default: + return nil, nil, false + } +} + +func stackParentIsReturn(stack []ast.Node) (*ast.ReturnStmt, bool) { + // it is safe to skip boundary check, since stack always has at least one element + // - whole file. + ret, ok := stack[len(stack)-2].(*ast.ReturnStmt) + + return ret, ok +} + +func returnContainsNonNilError(pass *analysis.Pass, ret *ast.ReturnStmt) bool { + // errors are mostly located at the end of return statement, so we're starting + // from the end. + for i := len(ret.Results) - 1; i >= 0; i-- { + if pass.TypesInfo.TypeOf(ret.Results[i]).String() == "error" { + return true + } + } + + return false +} + +func (a *analyzer) processStruct( + pass *analysis.Pass, + lit *ast.CompositeLit, + structTyp *types.Struct, + info *TypeInfo, + comments []*ast.CommentGroup, +) (*token.Pos, string) { + shouldProcess := a.shouldProcessType(info) + + if shouldProcess && comment.HasDirective(comments, comment.DirectiveIgnore) { + return nil, "" + } + + if !shouldProcess && !comment.HasDirective(comments, comment.DirectiveEnforce) { + return nil, "" + } + + // unnamed structures are only defined in same package, along with types that has + // prefix identical to current package name. + isSamePackage := info.PackagePath == pass.Pkg.Path() + + if f := a.litSkippedFields(lit, structTyp, !isSamePackage); len(f) > 0 { + pos := lit.Pos() + + if len(f) == 1 { + return &pos, fmt.Sprintf("%s is missing field %s", info.ShortString(), f.String()) + } + + return &pos, fmt.Sprintf("%s is missing fields %s", info.ShortString(), f.String()) + } + + return nil, "" +} + +// shouldProcessType returns true if type should be processed basing off include +// and exclude patterns, defined though constructor and\or flags. +func (a *analyzer) shouldProcessType(info *TypeInfo) bool { + if len(a.include) == 0 && len(a.exclude) == 0 { + return true + } + + name := info.String() + + a.typeProcessingNeedMu.RLock() + res, ok := a.typeProcessingNeed[name] + a.typeProcessingNeedMu.RUnlock() + + if !ok { + a.typeProcessingNeedMu.Lock() + res = true + + if a.include != nil && !a.include.MatchFullString(name) { + res = false + } + + if res && a.exclude != nil && a.exclude.MatchFullString(name) { + res = false + } + + a.typeProcessingNeed[name] = res + a.typeProcessingNeedMu.Unlock() + } + + return res +} + +func (a *analyzer) litSkippedFields( + lit *ast.CompositeLit, + typ *types.Struct, + onlyExported bool, +) structure.Fields { + return a.structFields.Get(typ).Skipped(lit, onlyExported) +} + +type TypeInfo struct { + Name string + PackageName string + PackagePath string +} + +func (t TypeInfo) String() string { + return t.PackagePath + "." + t.Name +} + +func (t TypeInfo) ShortString() string { + return t.PackageName + "." + t.Name +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/cache.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/cache.go new file mode 100644 index 0000000000..88edef638a --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/cache.go @@ -0,0 +1,35 @@ +package comment + +import ( + "go/ast" + "go/token" + "sync" +) + +type Cache struct { + comments map[*ast.File]ast.CommentMap + mu sync.RWMutex +} + +// Get returns a comment map for a given file. In case if a comment map is not +// found, it creates a new one. +func (c *Cache) Get(fset *token.FileSet, f *ast.File) ast.CommentMap { + c.mu.RLock() + if cm, ok := c.comments[f]; ok { + c.mu.RUnlock() + return cm + } + c.mu.RUnlock() + + c.mu.Lock() + defer c.mu.Unlock() + + if c.comments == nil { + c.comments = make(map[*ast.File]ast.CommentMap) + } + + cm := ast.NewCommentMap(fset, f, f.Comments) + c.comments[f] = cm + + return cm +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/directive.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/directive.go new file mode 100644 index 0000000000..a39a8076fa --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment/directive.go @@ -0,0 +1,28 @@ +package comment + +import ( + "go/ast" + "strings" +) + +type Directive string + +const ( + prefix = `//exhaustruct:` + DirectiveIgnore Directive = prefix + `ignore` + DirectiveEnforce Directive = prefix + `enforce` +) + +// HasDirective parses a directive from a given list of comments. +// If no directive is found, the second return value is `false`. +func HasDirective(comments []*ast.CommentGroup, expected Directive) bool { + for _, cg := range comments { + for _, commentLine := range cg.List { + if strings.HasPrefix(commentLine.Text, string(expected)) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern/list.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern/list.go new file mode 100644 index 0000000000..a16e5058d2 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern/list.go @@ -0,0 +1,82 @@ +package pattern + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + ErrEmptyPattern = fmt.Errorf("pattern can't be empty") + ErrCompilationFailed = fmt.Errorf("pattern compilation failed") +) + +// List is a list of regular expressions. +type List []*regexp.Regexp + +// NewList parses slice of strings to a slice of compiled regular expressions. +func NewList(strs ...string) (List, error) { + if len(strs) == 0 { + return nil, nil + } + + l := make(List, 0, len(strs)) + + for _, str := range strs { + re, err := strToRe(str) + if err != nil { + return nil, err + } + + l = append(l, re) + } + + return l, nil +} + +// MatchFullString matches provided string against all regexps in a slice and returns +// true if any of them matches whole string. +func (l List) MatchFullString(str string) bool { + for i := 0; i < len(l); i++ { + if m := l[i].FindStringSubmatch(str); len(m) > 0 && m[0] == str { + return true + } + } + + return false +} + +func (l *List) Set(value string) error { + re, err := strToRe(value) + if err != nil { + return err + } + + *l = append(*l, re) + + return nil +} + +func (l *List) String() string { + res := make([]string, 0, len(*l)) + + for _, re := range *l { + res = append(res, `"`+re.String()+`"`) + } + + return strings.Join(res, ", ") +} + +// strToRe parses string to a compiled regular expression that matches full string. +func strToRe(str string) (*regexp.Regexp, error) { + if str == "" { + return nil, ErrEmptyPattern + } + + re, err := regexp.Compile(str) + if err != nil { + return nil, fmt.Errorf("%w: %s: %w", ErrCompilationFailed, str, err) + } + + return re, nil +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields-cache.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields-cache.go new file mode 100644 index 0000000000..12a3796926 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields-cache.go @@ -0,0 +1,35 @@ +package structure + +import ( + "go/types" + "sync" +) + +type FieldsCache struct { + fields map[*types.Struct]Fields + mu sync.RWMutex +} + +// Get returns a struct fields for a given type. In case if a struct fields is +// not found, it creates a new one from type definition. +func (c *FieldsCache) Get(typ *types.Struct) Fields { + c.mu.RLock() + fields, ok := c.fields[typ] + c.mu.RUnlock() + + if ok { + return fields + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.fields == nil { + c.fields = make(map[*types.Struct]Fields) + } + + fields = NewFields(typ) + c.fields[typ] = fields + + return fields +} diff --git a/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields.go b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields.go new file mode 100644 index 0000000000..b6b1a48c87 --- /dev/null +++ b/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure/fields.go @@ -0,0 +1,127 @@ +package structure + +import ( + "go/ast" + "go/types" + "reflect" + "strings" +) + +const ( + tagName = "exhaustruct" + optionalTagValue = "optional" +) + +type Field struct { + Name string + Exported bool + Optional bool +} + +type Fields []*Field + +// NewFields creates a new [Fields] from a given struct type. +// Fields items are listed in order they appear in the struct. +func NewFields(strct *types.Struct) Fields { + sf := make(Fields, 0, strct.NumFields()) + + for i := 0; i < strct.NumFields(); i++ { + f := strct.Field(i) + + sf = append(sf, &Field{ + Name: f.Name(), + Exported: f.Exported(), + Optional: HasOptionalTag(strct.Tag(i)), + }) + } + + return sf +} + +func HasOptionalTag(tags string) bool { + return reflect.StructTag(tags).Get(tagName) == optionalTagValue +} + +// String returns a comma-separated list of field names. +func (sf Fields) String() string { + b := strings.Builder{} + + for i := 0; i < len(sf); i++ { + if b.Len() != 0 { + b.WriteString(", ") + } + + b.WriteString(sf[i].Name) + } + + return b.String() +} + +// Skipped returns a list of fields that are not present in the given +// literal, but expected to. +// +//revive:disable-next-line:cyclomatic +func (sf Fields) Skipped(lit *ast.CompositeLit, onlyExported bool) Fields { + if len(lit.Elts) != 0 && !isNamedLiteral(lit) { + if len(lit.Elts) == len(sf) { + return nil + } + + return sf[len(lit.Elts):] + } + + em := sf.existenceMap() + res := make(Fields, 0, len(sf)) + + for i := 0; i < len(lit.Elts); i++ { + kv, ok := lit.Elts[i].(*ast.KeyValueExpr) + if !ok { + continue + } + + k, ok := kv.Key.(*ast.Ident) + if !ok { + continue + } + + em[k.Name] = true + } + + for i := 0; i < len(sf); i++ { + if em[sf[i].Name] || (!sf[i].Exported && onlyExported) || sf[i].Optional { + continue + } + + res = append(res, sf[i]) + } + + if len(res) == 0 { + return nil + } + + return res +} + +func (sf Fields) existenceMap() map[string]bool { + m := make(map[string]bool, len(sf)) + + for i := 0; i < len(sf); i++ { + m[sf[i].Name] = false + } + + return m +} + +// isNamedLiteral returns true if the given literal is unnamed. +// +// The logic is basing on the principle that literal is named or unnamed, +// therefore is literal's first element is a [ast.KeyValueExpr], it is named. +// +// Method will panic if the given literal is empty. +func isNamedLiteral(lit *ast.CompositeLit) bool { + if _, ok := lit.Elts[0].(*ast.KeyValueExpr); !ok { + return false + } + + return true +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/README.md b/vendor/github.com/OpenPeeDeeP/depguard/README.md deleted file mode 100644 index 07e9f915df..0000000000 --- a/vendor/github.com/OpenPeeDeeP/depguard/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Depguard - -Go linter that checks package imports are in a list of acceptable packages. It -can also deny a list of packages and can do prefix or glob matching. -This allows you to allow imports from a whole organization or only -allow specific packages within a repository. It is recommended to use prefix -matching as it is faster than glob matching. The fewer glob matches the better. - -> If a pattern is matched by prefix it does not try to match via glob. - -## Install - -```bash -go get -u github.com/OpenPeeDeeP/depguard -``` - -## Config - -By default, Depguard looks for a file named `.depguard.json` in the current -current working directory. If it is somewhere else, pass in the `-c` flag with -the location of your configuration file. - -The following is an example configuration file. - -```json -{ - "type": "allowlist", - "packages": ["github.com/OpenPeeDeeP/depguard"], - "packageErrorMessages": { - "github.com/OpenPeeDeeP/depguards": "Please use \"github.com/OpenPeeDeeP/depguard\"," - }, - "inTests": ["github.com/stretchr/testify"], - "includeGoStdLib": true -} -``` - -- `type` can be either `allowlist` or `denylist`. This check is case insensitive. - If not specified the default is `denylist`. The values `whitelist` and `blacklist` - are also accepted for backwards compatibility. -- `packages` is a list of packages for the list type specified. -- `packageErrorMessages` is a mapping from packages to the error message to display -- `inTests` is a list of packages allowed/disallowed only in test files. -- Set `includeGoStdLib` (`includeGoRoot` for backwards compatibility) to true if you want to check the list against standard lib. - If not specified the default is false. - -### Ignore File Rules - -The configuration also allows us to specify rules to ignore certain files considered by the linter. This means that we need not apply package import checks across our entire code base. - -For example, consider the following configuration to block a test package: -```json -{ - "type": "denylist", - "packages": ["github.com/stretchr/testify"], - "inTests": ["github.com/stretchr/testify"] -} -``` - -We can use a `ignoreFileRules` field to write a configuration that only considers test files: -```json -{ - "type": "denylist", - "packages": ["github.com/stretchr/testify"], - "ignoreFileRules": ["!**/*_test.go"] -} -``` - -Or if we wanted to consider only non-test files: -```json -{ - "type": "denylist", - "packages": ["github.com/stretchr/testify"], - "ignoreFileRules": ["**/*_test.go"] -} -``` - -Like the `packages` field, the `ignoreFileRules` field can accept both string prefixes and string glob patterns. Note in the first example above, the use of the `!` character in front of the rule. This is a special character which signals that the linter should negate the rule. This allows for more precise control, but it is only available for glob patterns. - -## Gometalinter - -The binary installation of this linter can be used with -[Gometalinter](https://github.com/alecthomas/gometalinter). - -If you use a configuration file for Gometalinter then the following will need to -be added to your configuration file. - -```json -{ - "linters": { - "depguard": { - "command": "depguard -c path/to/config.json", - "pattern": "PATH:LINE:COL:MESSAGE", - "installFrom": "github.com/OpenPeeDeeP/depguard", - "isFast": true, - "partitionStrategy": "packages" - } - } -} -``` - -If you prefer the command line way the following will work for you as well. - -```bash -gometalinter --linter='depguard:depguard -c path/to/config.json:PATH:LINE:COL:MESSAGE' -``` - -## Golangci-lint - -This linter was built with -[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatible -and read their docs to see how to implement all their linters, including this one. diff --git a/vendor/github.com/OpenPeeDeeP/depguard/depguard.go b/vendor/github.com/OpenPeeDeeP/depguard/depguard.go deleted file mode 100644 index d7011cd9f6..0000000000 --- a/vendor/github.com/OpenPeeDeeP/depguard/depguard.go +++ /dev/null @@ -1,313 +0,0 @@ -package depguard - -import ( - "go/build" - "go/token" - "io/ioutil" - "path" - "sort" - "strings" - "sync" - - "github.com/gobwas/glob" - "golang.org/x/tools/go/loader" -) - -// ListType states what kind of list is passed in. -type ListType int - -const ( - // LTBlacklist states the list given is a blacklist. (default) - LTBlacklist ListType = iota - // LTWhitelist states the list given is a whitelist. - LTWhitelist -) - -// StringToListType makes it easier to turn a string into a ListType. -// It assumes that the string representation is lower case. -var StringToListType = map[string]ListType{ - "allowlist": LTWhitelist, - "denylist": LTBlacklist, - "whitelist": LTWhitelist, - "blacklist": LTBlacklist, -} - -// Issue with the package with PackageName at the Position. -type Issue struct { - PackageName string - Position token.Position -} - -// Wrapper for glob patterns that allows for custom negation -type negatableGlob struct { - g glob.Glob - negate bool -} - -// Depguard checks imports to make sure they follow the given list and constraints. -type Depguard struct { - ListType ListType - IncludeGoRoot bool - - Packages []string - prefixPackages []string - globPackages []glob.Glob - - TestPackages []string - prefixTestPackages []string - globTestPackages []glob.Glob - - IgnoreFileRules []string - prefixIgnoreFileRules []string - globIgnoreFileRules []negatableGlob - - prefixRoot []string - - isInitialized bool - isInitializedMutex sync.Mutex -} - -// Run checks for dependencies given the program and validates them against -// Packages. -func (dg *Depguard) Run(config *loader.Config, prog *loader.Program) ([]*Issue, error) { - // Shortcut execution on an empty blacklist as that means every package is allowed - if dg.ListType == LTBlacklist && len(dg.Packages) == 0 { - return nil, nil - } - - if err := dg.initialize(config, prog); err != nil { - return nil, err - } - directImports, err := dg.createImportMap(prog) - if err != nil { - return nil, err - } - var issues []*Issue - for pkg, positions := range directImports { - for _, pos := range positions { - if ignoreFile(pos.Filename, dg.prefixIgnoreFileRules, dg.globIgnoreFileRules) { - continue - } - - prefixList, globList := dg.prefixPackages, dg.globPackages - if len(dg.TestPackages) > 0 && strings.Index(pos.Filename, "_test.go") != -1 { - prefixList, globList = dg.prefixTestPackages, dg.globTestPackages - } - - if dg.flagIt(pkg, prefixList, globList) { - issues = append(issues, &Issue{ - PackageName: pkg, - Position: pos, - }) - } - } - } - return issues, nil -} - -func (dg *Depguard) initialize(config *loader.Config, prog *loader.Program) error { - dg.isInitializedMutex.Lock() - defer dg.isInitializedMutex.Unlock() - - if dg.isInitialized { - return nil - } - - // parse ordinary guarded packages - for _, pkg := range dg.Packages { - if strings.ContainsAny(pkg, "!?*[]{}") { - g, err := glob.Compile(pkg, '/') - if err != nil { - return err - } - dg.globPackages = append(dg.globPackages, g) - } else { - dg.prefixPackages = append(dg.prefixPackages, pkg) - } - } - - // Sort the packages so we can have a faster search in the array - sort.Strings(dg.prefixPackages) - - // parse guarded tests packages - for _, pkg := range dg.TestPackages { - if strings.ContainsAny(pkg, "!?*[]{}") { - g, err := glob.Compile(pkg, '/') - if err != nil { - return err - } - dg.globTestPackages = append(dg.globTestPackages, g) - } else { - dg.prefixTestPackages = append(dg.prefixTestPackages, pkg) - } - } - - // Sort the test packages so we can have a faster search in the array - sort.Strings(dg.prefixTestPackages) - - // parse ignore file rules - for _, rule := range dg.IgnoreFileRules { - if strings.ContainsAny(rule, "!?*[]{}") { - ng := negatableGlob{} - if strings.HasPrefix(rule, "!") { - ng.negate = true - rule = rule[1:] // Strip out the leading '!' - } else { - ng.negate = false - } - - g, err := glob.Compile(rule, '/') - if err != nil { - return err - } - ng.g = g - - dg.globIgnoreFileRules = append(dg.globIgnoreFileRules, ng) - } else { - dg.prefixIgnoreFileRules = append(dg.prefixIgnoreFileRules, rule) - } - } - - // Sort the rules so we can have a faster search in the array - sort.Strings(dg.prefixIgnoreFileRules) - - if !dg.IncludeGoRoot { - var err error - dg.prefixRoot, err = listRootPrefixs(config.Build) - if err != nil { - return err - } - } - - dg.isInitialized = true - return nil -} - -func (dg *Depguard) createImportMap(prog *loader.Program) (map[string][]token.Position, error) { - importMap := make(map[string][]token.Position) - // For the directly imported packages - for _, imported := range prog.InitialPackages() { - // Go through their files - for _, file := range imported.Files { - // And populate a map of all direct imports and their positions - // This will filter out GoRoot depending on the Depguard.IncludeGoRoot - for _, fileImport := range file.Imports { - fileImportPath := cleanBasicLitString(fileImport.Path.Value) - if !dg.IncludeGoRoot && dg.isRoot(fileImportPath) { - continue - } - position := prog.Fset.Position(fileImport.Pos()) - positions, found := importMap[fileImportPath] - if !found { - importMap[fileImportPath] = []token.Position{ - position, - } - continue - } - importMap[fileImportPath] = append(positions, position) - } - } - } - return importMap, nil -} - -func ignoreFile(filename string, prefixList []string, negatableGlobList []negatableGlob) bool { - if strInPrefixList(filename, prefixList) { - return true - } - return strInNegatableGlobList(filename, negatableGlobList) -} - -func pkgInList(pkg string, prefixList []string, globList []glob.Glob) bool { - if strInPrefixList(pkg, prefixList) { - return true - } - return strInGlobList(pkg, globList) -} - -func strInPrefixList(str string, prefixList []string) bool { - // Idx represents where in the prefix slice the passed in string would go - // when sorted. -1 Just means that it would be at the very front of the slice. - idx := sort.Search(len(prefixList), func(i int) bool { - return prefixList[i] > str - }) - 1 - // This means that the string passed in has no way to be prefixed by anything - // in the prefix list as it is already smaller then everything - if idx == -1 { - return false - } - return strings.HasPrefix(str, prefixList[idx]) -} - -func strInGlobList(str string, globList []glob.Glob) bool { - for _, g := range globList { - if g.Match(str) { - return true - } - } - return false -} - -func strInNegatableGlobList(str string, negatableGlobList []negatableGlob) bool { - for _, ng := range negatableGlobList { - // Return true when: - // - Match is true and negate is off - // - Match is false and negate is on - if ng.g.Match(str) != ng.negate { - return true - } - } - return false -} - -// InList | WhiteList | BlackList -// y | | x -// n | x | -func (dg *Depguard) flagIt(pkg string, prefixList []string, globList []glob.Glob) bool { - return pkgInList(pkg, prefixList, globList) == (dg.ListType == LTBlacklist) -} - -func cleanBasicLitString(value string) string { - return strings.Trim(value, "\"\\") -} - -// We can do this as all imports that are not root are either prefixed with a domain -// or prefixed with `./` or `/` to dictate it is a local file reference -func listRootPrefixs(buildCtx *build.Context) ([]string, error) { - if buildCtx == nil { - buildCtx = &build.Default - } - root := path.Join(buildCtx.GOROOT, "src") - fs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - var pkgPrefix []string - for _, f := range fs { - if !f.IsDir() { - continue - } - pkgPrefix = append(pkgPrefix, f.Name()) - } - return pkgPrefix, nil -} - -func (dg *Depguard) isRoot(importPath string) bool { - // Idx represents where in the package slice the passed in package would go - // when sorted. -1 Just means that it would be at the very front of the slice. - idx := sort.Search(len(dg.prefixRoot), func(i int) bool { - return dg.prefixRoot[i] > importPath - }) - 1 - // This means that the package passed in has no way to be prefixed by anything - // in the package list as it is already smaller then everything - if idx == -1 { - return false - } - // if it is prefixed by a root prefix we need to check if it is an exact match - // or prefix with `/` as this could return false posative if the domain was - // `archive.com` for example as `archive` is a go root package. - if strings.HasPrefix(importPath, dg.prefixRoot[idx]) { - return strings.HasPrefix(importPath, dg.prefixRoot[idx]+"/") || importPath == dg.prefixRoot[idx] - } - return false -} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/.gitignore b/vendor/github.com/OpenPeeDeeP/depguard/v2/.gitignore similarity index 93% rename from vendor/github.com/OpenPeeDeeP/depguard/.gitignore rename to vendor/github.com/OpenPeeDeeP/depguard/v2/.gitignore index 97cca67c67..e189bdb220 100644 --- a/vendor/github.com/OpenPeeDeeP/depguard/.gitignore +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/.gitignore @@ -12,3 +12,4 @@ *.out .idea +.null-ls*.go diff --git a/vendor/github.com/OpenPeeDeeP/depguard/LICENSE b/vendor/github.com/OpenPeeDeeP/depguard/v2/LICENSE similarity index 100% rename from vendor/github.com/OpenPeeDeeP/depguard/LICENSE rename to vendor/github.com/OpenPeeDeeP/depguard/v2/LICENSE diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md b/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md new file mode 100644 index 0000000000..2ccfa22c59 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md @@ -0,0 +1,163 @@ +# Depguard + +A Go linter that checks package imports are in a list of acceptable packages. +This allows you to allow imports from a whole organization or only +allow specific packages within a repository. + +## Install + +```bash +go install github.com/OpenPeeDeeP/depguard@latest +``` + +## Config + +The Depguard binary looks for a file named `^\.?depguard\.(yaml|yml|json|toml)$` in the current working directory. Examples include (`.depguard.yml` or `depguard.toml`). + +The following is an example configuration file. + +```json +{ + "main": { + "files": [ + "$all", + "!$test" + ], + "listMode": "Strict", + "allow": [ + "$gostd", + "github.com/OpenPeeDeeP" + ], + "deny": { + "reflect": "Who needs reflection", + } + }, + "tests": { + "files": [ + "$test" + ], + "listMode": "Lax", + "deny": { + "github.com/stretchr/testify": "Please use standard library for tests" + } + } +} +``` + +- The top level is a map of lists. The key of the map is a name that shows up in +the linter's output. +- `files` - list of file globs that will match this list of settings to compare against +- `allow` - list of allowed packages +- `deny` - map of packages that are not allowed where the value is a suggestion += `listMode` - the mode to use for package matching + +Files are matched using [Globs](https://github.com/gobwas/glob). If the files +list is empty, then all files will match that list. Prefixing a file +with an exclamation mark `!` will put that glob in a "don't match" list. A file +will match a list if it is allowed and not denied. + +> Should always prefix a file glob with `**/` as files are matched against absolute paths. + +Allow is a prefix of packages to allow. A dollar sign `$` can be used at the end +of a package to specify it must be exact match only. + +Deny is a map where the key is a prefix of the package to deny, and the value +is a suggestion on what to use instead. A dollar sign `$` can be used at the end +of a package to specify it must be exact match only. + +A Prefix List just means that a package will match a value, if the value is a +prefix of the package. Example `github.com/OpenPeeDeeP/depguard` package will match +a value of `github.com/OpenPeeDeeP` but won't match `github.com/OpenPeeDeeP/depguard/v2`. + +ListMode is used to determine the package matching priority. There are three +different modes; Original, Strict, and Lax. + +Original is the original way that the package was written to use. It is not recommended +to stay with this and is only here for backwards compatibility. + +Strict, at its roots, is everything is denied unless in allowed. + +Lax, at its roots, is everything is allowed unless it is denied. + +There are cases where a package can be matched in both the allow and denied lists. +You may allow a subpackage but deny the root or vice versa. The `settings_tests.go` file +has many scenarios listed out under `TestListImportAllowed`. These tests will stay +up to date as features are added. + +### Variables + +There are variable replacements for each type of list (file or package). This is +to reduce repetition and tedious behaviors. + +#### File Variables + +> you can still use an exclamation mark `!` in front of a variable to say not to +use it. Example `!$test` will match any file that is not a go test file. + +- `$all` - matches all go files +- `$test` - matches all go test files + +#### Package Variables + +- `$gostd` - matches all of go's standard library (Pulled from GOROOT) + +### Example Configs + +Below: + +- non-test go files will match `Main` and test go files will match `Test`. +- both allow all of go standard library except for the `reflect` package which will +tell the user "Please don't use reflect package". +- go test files are also allowed to use https://github.com/stretchr/testify package +and any sub-package of it. + +```yaml +Main: + files: + - $all + - "!$test" + allow: + - $gostd + deny: + reflect: Please don't use reflect package +Test: + files: + - $test + allow: + - $gostd + - github.com/stretchr/testify + deny: + reflect: Please don't use reflect package +``` + +Below: + +- All go files will match `Main` +- Go files in internal will match both `Main` and `Internal` + +```yaml +Main: + files: + - $all +Internal: + files: + - "**/internal/**/*.go" +``` + +Below: + +- All packages are allowed except for `github.com/OpenPeeDeeP/depguard`. Though +`github.com/OpenPeeDeeP/depguard/v2` and `github.com/OpenPeeDeeP/depguard/somepackage` +would be allowed. + +```yaml +Main: + deny: + - github.com/OpenPeeDeeP/depguard$ +``` + +## Golangci-lint + +This linter was built with +[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatible +and read their docs to see how to implement all their linters, including this one. diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go new file mode 100644 index 0000000000..2729091e8a --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go @@ -0,0 +1,95 @@ +package depguard + +import ( + "fmt" + "go/ast" + "path/filepath" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// NewAnalyzer creates a new analyzer from the settings passed in. +// This can fail if the passed in LinterSettings does not compile. +// Use NewUncompiledAnalyzer if you need control when the compile happens. +func NewAnalyzer(settings *LinterSettings) (*analysis.Analyzer, error) { + s, err := settings.compile() + if err != nil { + return nil, err + } + analyzer := newAnalyzer(s.run) + return analyzer, nil +} + +type UncompiledAnalyzer struct { + Analyzer *analysis.Analyzer + settings *LinterSettings +} + +// NewUncompiledAnalyzer creates a new analyzer from the settings passed in. +// This can never error unlike NewAnalyzer. +// It is advised to call the Compile method on the returned Analyzer before running. +func NewUncompiledAnalyzer(settings *LinterSettings) *UncompiledAnalyzer { + return &UncompiledAnalyzer{ + Analyzer: newAnalyzer(settings.run), + settings: settings, + } +} + +// Compile the settings ahead of time so each subsuquent run of the analyzer doesn't +// need to do this work. +func (ua *UncompiledAnalyzer) Compile() error { + s, err := ua.settings.compile() + if err != nil { + return err + } + ua.Analyzer.Run = s.run + return nil +} + +func (settings LinterSettings) run(pass *analysis.Pass) (interface{}, error) { + s, err := settings.compile() + if err != nil { + return nil, err + } + return s.run(pass) +} + +func newAnalyzer(run func(*analysis.Pass) (interface{}, error)) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "depguard", + Doc: "Go linter that checks if package imports are in a list of acceptable packages", + URL: "https://github.com/OpenPeeDeeP/depguard", + Run: run, + RunDespiteErrors: false, + } +} + +func (s linterSettings) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + // For Windows need to replace separator with '/' + fileName := filepath.ToSlash(pass.Fset.Position(file.Pos()).Filename) + lists := s.whichLists(fileName) + for _, imp := range file.Imports { + for _, l := range lists { + if allowed, sugg := l.importAllowed(rawBasicLit(imp.Path)); !allowed { + diag := analysis.Diagnostic{ + Pos: imp.Pos(), + End: imp.End(), + Message: fmt.Sprintf("import '%s' is not allowed from list '%s'", rawBasicLit(imp.Path), l.name), + } + if sugg != "" { + diag.Message = fmt.Sprintf("%s: %s", diag.Message, sugg) + diag.SuggestedFixes = append(diag.SuggestedFixes, analysis.SuggestedFix{Message: sugg}) + } + pass.Report(diag) + } + } + } + } + return nil, nil +} + +func rawBasicLit(lit *ast.BasicLit) string { + return strings.Trim(lit.Value, "\"") +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/errors.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/errors.go new file mode 100644 index 0000000000..65325f6128 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/errors.go @@ -0,0 +1,18 @@ +package utils + +import ( + "strings" +) + +type MultiError []error + +func (me MultiError) Error() string { + b := strings.Builder{} + for i, e := range me { + b.WriteString(e.Error()) + if i < len(me)-1 { + b.WriteByte('\n') + } + } + return b.String() +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/variables.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/variables.go new file mode 100644 index 0000000000..3363bd8400 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/internal/utils/variables.go @@ -0,0 +1,131 @@ +package utils + +import ( + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" +) + +type Expander interface { + Expand() ([]string, error) +} + +type ExpanderMap map[string]Expander + +var ( + PathExpandable = ExpanderMap{ + "$all": &allExpander{}, + "$test": &testExpander{}, + } + PackageExpandable = ExpanderMap{ + "$gostd": &gostdExpander{}, + } +) + +type allExpander struct{} + +func (*allExpander) Expand() ([]string, error) { + return []string{"**/*.go"}, nil +} + +type testExpander struct{} + +func (*testExpander) Expand() ([]string, error) { + return []string{"**/*_test.go"}, nil +} + +type gostdExpander struct { + cache []string +} + +// We can do this as all imports that are not root are either prefixed with a domain +// or prefixed with `./` or `/` to dictate it is a local file reference +func (e *gostdExpander) Expand() ([]string, error) { + if len(e.cache) != 0 { + return e.cache, nil + } + root := path.Join(findGOROOT(), "src") + fs, err := os.ReadDir(root) + if err != nil { + return nil, fmt.Errorf("could not read GOROOT directory: %w", err) + } + var pkgPrefix []string + for _, f := range fs { + if !f.IsDir() { + continue + } + pkgPrefix = append(pkgPrefix, f.Name()) + } + e.cache = pkgPrefix + return pkgPrefix, nil +} + +func findGOROOT() string { + // code borrowed from https://github.com/golang/tools/blob/86c93e8732cce300d0270bce23117456ce92bb17/cmd/godoc/goroot.go#L15-L30 + if env := os.Getenv("GOROOT"); env != "" { + return filepath.Clean(env) + } + def := filepath.Clean(runtime.GOROOT()) + if runtime.Compiler == "gccgo" { + // gccgo has no real GOROOT, and it certainly doesn't + // depend on the executable's location. + return def + } + out, err := exec.Command("go", "env", "GOROOT").Output() + if err != nil { + return def + } + return strings.TrimSpace(string(out)) +} + +func ExpandSlice(sl []string, exp ExpanderMap) ([]string, error) { + for i, s := range sl { + f, found := exp[s] + if !found { + continue + } + e, err := f.Expand() + if err != nil { + return nil, fmt.Errorf("couldn't expand %s: %w", s, err) + } + sl = insertSlice(sl, i, e...) + } + return sl, nil +} + +func ExpandMap(m map[string]string, exp ExpanderMap) error { + for k, v := range m { + f, found := exp[k] + if !found { + continue + } + e, err := f.Expand() + if err != nil { + return fmt.Errorf("couldn't expand %s: %w", k, err) + } + for _, ex := range e { + m[ex] = v + } + delete(m, k) + } + return nil +} + +func insertSlice(a []string, k int, b ...string) []string { + n := len(a) + len(b) - 1 + if n <= cap(a) { + a2 := a[:n] + copy(a2[k+len(b):], a[k+1:]) + copy(a2[k:], b) + return a2 + } + a2 := make([]string, n) + copy(a2, a[:k]) + copy(a2[k:], b) + copy(a2[k+len(b):], a[k+1:]) + return a2 +} diff --git a/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go b/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go new file mode 100644 index 0000000000..311cacc889 --- /dev/null +++ b/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go @@ -0,0 +1,240 @@ +package depguard + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/OpenPeeDeeP/depguard/v2/internal/utils" + "github.com/gobwas/glob" +) + +type List struct { + ListMode string `json:"listMode" yaml:"listMode" toml:"listMode" mapstructure:"listMode"` + Files []string `json:"files" yaml:"files" toml:"files" mapstructure:"files"` + Allow []string `json:"allow" yaml:"allow" toml:"allow" mapstructure:"allow"` + Deny map[string]string `json:"deny" yaml:"deny" toml:"deny" mapstructure:"deny"` +} + +type listMode int + +const ( + listModeOriginal listMode = iota + listModeStrict + listModeLax +) + +type list struct { + listMode listMode + name string + files []glob.Glob + negFiles []glob.Glob + allow []string + deny []string + suggestions []string +} + +func (l *List) compile() (*list, error) { + if l == nil { + return nil, nil + } + li := &list{} + var errs utils.MultiError + var err error + + // Determine List Mode + switch strings.ToLower(l.ListMode) { + case "": + li.listMode = listModeOriginal + case "original": + li.listMode = listModeOriginal + case "strict": + li.listMode = listModeStrict + case "lax": + li.listMode = listModeLax + default: + errs = append(errs, fmt.Errorf("%s is not a known list mode", l.ListMode)) + } + + // Compile Files + for _, f := range l.Files { + var negate bool + if len(f) > 0 && f[0] == '!' { + negate = true + f = f[1:] + } + // Expand File if needed + fs, err := utils.ExpandSlice([]string{f}, utils.PathExpandable) + if err != nil { + errs = append(errs, err) + } + for _, exp := range fs { + g, err := glob.Compile(exp, '/') + if err != nil { + errs = append(errs, fmt.Errorf("%s could not be compiled: %w", exp, err)) + continue + } + if negate { + li.negFiles = append(li.negFiles, g) + continue + } + li.files = append(li.files, g) + } + } + + if len(l.Allow) > 0 { + // Expand Allow + l.Allow, err = utils.ExpandSlice(l.Allow, utils.PackageExpandable) + if err != nil { + errs = append(errs, err) + } + + // Sort Allow + li.allow = make([]string, len(l.Allow)) + copy(li.allow, l.Allow) + sort.Strings(li.allow) + } + + if l.Deny != nil { + // Expand Deny Map (to keep suggestions) + err = utils.ExpandMap(l.Deny, utils.PackageExpandable) + if err != nil { + errs = append(errs, err) + } + + // Split Deny Into Package Slice + li.deny = make([]string, 0, len(l.Deny)) + for pkg := range l.Deny { + li.deny = append(li.deny, pkg) + } + + // Sort Deny + sort.Strings(li.deny) + + // Populate Suggestions to match the Deny order + li.suggestions = make([]string, 0, len(li.deny)) + for _, dp := range li.deny { + li.suggestions = append(li.suggestions, strings.TrimSpace(l.Deny[dp])) + } + } + + // Populate the type of this list + if len(li.allow) == 0 && len(li.deny) == 0 { + errs = append(errs, errors.New("must have an Allow and/or Deny package list")) + } + + if len(errs) > 0 { + return nil, errs + } + return li, nil +} + +func (l *list) fileMatch(fileName string) bool { + inAllowed := len(l.files) == 0 || strInGlobList(fileName, l.files) + inDenied := strInGlobList(fileName, l.negFiles) + return inAllowed && !inDenied +} + +func (l *list) importAllowed(imp string) (bool, string) { + inAllowed, aIdx := strInPrefixList(imp, l.allow) + inDenied, dIdx := strInPrefixList(imp, l.deny) + var allowed bool + switch l.listMode { + case listModeOriginal: + inAllowed = len(l.allow) == 0 || inAllowed + allowed = inAllowed && !inDenied + case listModeStrict: + allowed = inAllowed && (!inDenied || len(l.allow[aIdx]) > len(l.deny[dIdx])) + case listModeLax: + allowed = !inDenied || (inAllowed && len(l.allow[aIdx]) > len(l.deny[dIdx])) + default: + allowed = false + } + sugg := "" + if !allowed && inDenied && dIdx != -1 { + sugg = l.suggestions[dIdx] + } + return allowed, sugg +} + +type LinterSettings map[string]*List + +type linterSettings []*list + +func (l LinterSettings) compile() (linterSettings, error) { + if len(l) == 0 { + // Only allow $gostd in all files + set := &List{ + Files: []string{"$all"}, + Allow: []string{"$gostd"}, + } + li, err := set.compile() + if err != nil { + return nil, err + } + li.name = "Main" + return linterSettings{li}, nil + } + names := make([]string, 0, len(l)) + for name := range l { + names = append(names, name) + } + sort.Strings(names) + li := make(linterSettings, 0, len(l)) + var errs utils.MultiError + for _, name := range names { + c, err := l[name].compile() + if err != nil { + errs = append(errs, err) + continue + } + if c == nil { + continue + } + c.name = name + li = append(li, c) + } + if len(errs) > 0 { + return nil, errs + } + + return li, nil +} + +func (ls linterSettings) whichLists(fileName string) []*list { + var matches []*list + for _, l := range ls { + if l.fileMatch(fileName) { + matches = append(matches, l) + } + } + return matches +} + +func strInGlobList(str string, globList []glob.Glob) bool { + for _, g := range globList { + if g.Match(str) { + return true + } + } + return false +} + +func strInPrefixList(str string, prefixList []string) (bool, int) { + // Idx represents where in the prefix slice the passed in string would go + // when sorted. -1 Just means that it would be at the very front of the slice. + idx := sort.Search(len(prefixList), func(i int) bool { + return strings.TrimRight(prefixList[i], "$") > str + }) - 1 + // This means that the string passed in has no way to be prefixed by anything + // in the prefix list as it is already smaller then everything + if idx == -1 { + return false, idx + } + ioc := prefixList[idx] + if ioc[len(ioc)-1] == '$' { + return str == ioc[:len(ioc)-1], idx + } + return strings.HasPrefix(str, prefixList[idx]), idx +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/.goreleaser.yml b/vendor/github.com/alecthomas/go-check-sumtype/.goreleaser.yml new file mode 100644 index 0000000000..33bd03d060 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/.goreleaser.yml @@ -0,0 +1,32 @@ +project_name: go-check-sumtype +release: + github: + owner: alecthomas + name: go-check-sumtype +env: + - CGO_ENABLED=0 +builds: +- goos: + - linux + - darwin + - windows + goarch: + - arm64 + - amd64 + - "386" + goarm: + - "6" + main: ./cmd/go-check-sumtype + binary: go-check-sumtype +archives: + - + format: tar.gz + name_template: '{{ .Binary }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ + .Arm }}{{ end }}' + files: + - COPYING + - README* +snapshot: + name_template: SNAPSHOT-{{ .Commit }} +checksum: + name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' diff --git a/vendor/github.com/alecthomas/go-check-sumtype/COPYING b/vendor/github.com/alecthomas/go-check-sumtype/COPYING new file mode 100644 index 0000000000..bb9c20a094 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/COPYING @@ -0,0 +1,3 @@ +This project is dual-licensed under the Unlicense and MIT licenses. + +You may use this code under the terms of either license. diff --git a/vendor/github.com/alecthomas/go-check-sumtype/LICENSE-MIT b/vendor/github.com/alecthomas/go-check-sumtype/LICENSE-MIT new file mode 100644 index 0000000000..3b0a5dc09c --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/LICENSE-MIT @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Gallant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/alecthomas/go-check-sumtype/README.md b/vendor/github.com/alecthomas/go-check-sumtype/README.md new file mode 100644 index 0000000000..36614ef400 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/README.md @@ -0,0 +1,120 @@ +**Note: This is a fork of the great project [go-sumtype](https://github.com/BurntSushi/go-sumtype) by BurntSushi.** +**The original seems largely unmaintained, and the changes in this fork are backwards incompatible.** + +# go-check-sumtype [![CI](https://github.com/alecthomas/go-check-sumtype/actions/workflows/ci.yml/badge.svg)](https://github.com/alecthomas/go-check-sumtype/actions/workflows/ci.yml) +A simple utility for running exhaustiveness checks on type switch statements. +Exhaustiveness checks are only run on interfaces that are declared to be +"sum types." + +Dual-licensed under MIT or the [UNLICENSE](http://unlicense.org). + +This work was inspired by our code at +[Diffeo](https://diffeo.com). + +## Installation + +```go +$ go get github.com/alecthomas/go-check-sumtype +``` + +For usage info, just run the command: + +``` +$ go-check-sumtype +``` + +Typical usage might look like this: + +``` +$ go-check-sumtype $(go list ./... | grep -v vendor) +``` + +## Usage + +`go-check-sumtype` takes a list of Go package paths or files and looks for sum type +declarations in each package/file provided. Exhaustiveness checks are then +performed for each use of a declared sum type in a type switch statement. +Namely, `go-check-sumtype` will report an error for any type switch statement that +either lacks a `default` clause or does not account for all possible variants. + +Declarations are provided in comments like so: + +``` +//sumtype:decl +type MySumType interface { ... } +``` + +`MySumType` must be *sealed*. That is, part of its interface definition +contains an unexported method. + +`go-check-sumtype` will produce an error if any of the above is not true. + +For valid declarations, `go-check-sumtype` will look for all occurrences in which a +value of type `MySumType` participates in a type switch statement. In those +occurrences, it will attempt to detect whether the type switch is exhaustive +or not. If it's not, `go-check-sumtype` will report an error. For example, running +`go-check-sumtype` on this source file: + +```go +package main + +//sumtype:decl +type MySumType interface { + sealed() +} + +type VariantA struct{} + +func (*VariantA) sealed() {} + +type VariantB struct{} + +func (*VariantB) sealed() {} + +func main() { + switch MySumType(nil).(type) { + case *VariantA: + } +} +``` + +produces the following: + +``` +$ sumtype mysumtype.go +mysumtype.go:18:2: exhaustiveness check failed for sum type 'MySumType': missing cases for VariantB +``` + +Adding either a `default` clause or a clause to handle `*VariantB` will cause +exhaustive checks to pass. + +As a special case, if the type switch statement contains a `default` clause +that always panics, then exhaustiveness checks are still performed. + +## Details and motivation + +Sum types are otherwise known as discriminated unions. That is, a sum type is +a finite set of disjoint values. In type systems that support sum types, the +language will guarantee that if one has a sum type `T`, then its value must +be one of its variants. + +Go's type system does not support sum types. A typical proxy for representing +sum types in Go is to use an interface with an unexported method and define +each variant of the sum type in the same package to satisfy said interface. +This guarantees that the set of types that satisfy the interface is closed +at compile time. Performing case analysis on these types is then done with +a type switch statement, e.g., `switch x.(type) { ... }`. Each clause of the +type switch corresponds to a *variant* of the sum type. The downside of this +approach is that Go's type system is not aware of the set of variants, so it +cannot tell you whether case analysis over a sum type is complete or not. + +The `go-check-sumtype` command recognizes this pattern, but it needs a small amount +of help to recognize which interfaces should be treated as sum types, which +is why the `//sumtype:decl` annotation is required. `go-check-sumtype` will +figure out all of the variants of a sum type by finding the set of types +defined in the same package that satisfy the interface specified by the +declaration. + +The `go-check-sumtype` command will prove its worth when you need to add a variant +to an existing sum type. Running `go-check-sumtype` will tell you immediately which +case analyses need to be updated to account for the new variant. diff --git a/vendor/github.com/alecthomas/go-check-sumtype/UNLICENSE b/vendor/github.com/alecthomas/go-check-sumtype/UNLICENSE new file mode 100644 index 0000000000..68a49daad8 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/UNLICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/vendor/github.com/alecthomas/go-check-sumtype/check.go b/vendor/github.com/alecthomas/go-check-sumtype/check.go new file mode 100644 index 0000000000..21d751af42 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/check.go @@ -0,0 +1,184 @@ +package gochecksumtype + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +// inexhaustiveError is returned from check for each occurrence of inexhaustive +// case analysis in a Go type switch statement. +type inexhaustiveError struct { + Position token.Position + Def sumTypeDef + Missing []types.Object +} + +func (e inexhaustiveError) Pos() token.Position { return e.Position } +func (e inexhaustiveError) Error() string { + return fmt.Sprintf( + "%s: exhaustiveness check failed for sum type %q (from %s): missing cases for %s", + e.Pos(), e.Def.Decl.TypeName, e.Def.Decl.Pos, strings.Join(e.Names(), ", ")) +} + +// Names returns a sorted list of names corresponding to the missing variant +// cases. +func (e inexhaustiveError) Names() []string { + var list []string + for _, o := range e.Missing { + list = append(list, o.Name()) + } + sort.Strings(list) + return list +} + +// check does exhaustiveness checking for the given sum type definitions in the +// given package. Every instance of inexhaustive case analysis is returned. +func check(pkg *packages.Package, defs []sumTypeDef) []error { + var errs []error + for _, astfile := range pkg.Syntax { + ast.Inspect(astfile, func(n ast.Node) bool { + swtch, ok := n.(*ast.TypeSwitchStmt) + if !ok { + return true + } + if err := checkSwitch(pkg, defs, swtch); err != nil { + errs = append(errs, err) + } + return true + }) + } + return errs +} + +// checkSwitch performs an exhaustiveness check on the given type switch +// statement. If the type switch is used on a sum type and does not cover +// all variants of that sum type, then an error is returned indicating which +// variants were missed. +// +// Note that if the type switch contains a non-panicing default case, then +// exhaustiveness checks are disabled. +func checkSwitch( + pkg *packages.Package, + defs []sumTypeDef, + swtch *ast.TypeSwitchStmt, +) error { + def, missing := missingVariantsInSwitch(pkg, defs, swtch) + if len(missing) > 0 { + return inexhaustiveError{ + Position: pkg.Fset.Position(swtch.Pos()), + Def: *def, + Missing: missing, + } + } + return nil +} + +// missingVariantsInSwitch returns a list of missing variants corresponding to +// the given switch statement. The corresponding sum type definition is also +// returned. (If no sum type definition could be found, then no exhaustiveness +// checks are performed, and therefore, no missing variants are returned.) +func missingVariantsInSwitch( + pkg *packages.Package, + defs []sumTypeDef, + swtch *ast.TypeSwitchStmt, +) (*sumTypeDef, []types.Object) { + asserted := findTypeAssertExpr(swtch) + ty := pkg.TypesInfo.TypeOf(asserted) + def := findDef(defs, ty) + if def == nil { + // We couldn't find a corresponding sum type, so there's + // nothing we can do to check it. + return nil, nil + } + variantExprs, hasDefault := switchVariants(swtch) + if hasDefault && !defaultClauseAlwaysPanics(swtch) { + // A catch-all case defeats all exhaustiveness checks. + return def, nil + } + var variantTypes []types.Type + for _, expr := range variantExprs { + variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) + } + return def, def.missing(variantTypes) +} + +// switchVariants returns all case expressions found in a type switch. This +// includes expressions from cases that have a list of expressions. +func switchVariants(swtch *ast.TypeSwitchStmt) (exprs []ast.Expr, hasDefault bool) { + for _, stmt := range swtch.Body.List { + clause := stmt.(*ast.CaseClause) + if clause.List == nil { + hasDefault = true + } else { + exprs = append(exprs, clause.List...) + } + } + return +} + +// defaultClauseAlwaysPanics returns true if the given switch statement has a +// default clause that always panics. Note that this is done on a best-effort +// basis. While there will never be any false positives, there may be false +// negatives. +// +// If the given switch statement has no default clause, then this function +// panics. +func defaultClauseAlwaysPanics(swtch *ast.TypeSwitchStmt) bool { + var clause *ast.CaseClause + for _, stmt := range swtch.Body.List { + c := stmt.(*ast.CaseClause) + if c.List == nil { + clause = c + break + } + } + if clause == nil { + panic("switch statement has no default clause") + } + if len(clause.Body) != 1 { + return false + } + exprStmt, ok := clause.Body[0].(*ast.ExprStmt) + if !ok { + return false + } + callExpr, ok := exprStmt.X.(*ast.CallExpr) + if !ok { + return false + } + fun, ok := callExpr.Fun.(*ast.Ident) + if !ok { + return false + } + return fun.Name == "panic" +} + +// findTypeAssertExpr extracts the expression that is being type asserted from a +// type swtich statement. +func findTypeAssertExpr(swtch *ast.TypeSwitchStmt) ast.Expr { + var expr ast.Expr + if assign, ok := swtch.Assign.(*ast.AssignStmt); ok { + expr = assign.Rhs[0] + } else { + expr = swtch.Assign.(*ast.ExprStmt).X + } + return expr.(*ast.TypeAssertExpr).X +} + +// findDef returns the sum type definition corresponding to the given type. If +// no such sum type definition exists, then nil is returned. +func findDef(defs []sumTypeDef, needle types.Type) *sumTypeDef { + for i := range defs { + def := &defs[i] + if types.Identical(needle.Underlying(), def.Ty) { + return def + } + } + return nil +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/decl.go b/vendor/github.com/alecthomas/go-check-sumtype/decl.go new file mode 100644 index 0000000000..9dec9eefd5 --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/decl.go @@ -0,0 +1,69 @@ +package gochecksumtype + +import ( + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/packages" +) + +// sumTypeDecl is a declaration of a sum type in a Go source file. +type sumTypeDecl struct { + // The package path that contains this decl. + Package *packages.Package + // The type named by this decl. + TypeName string + // Position where the declaration was found. + Pos token.Position +} + +// Location returns a short string describing where this declaration was found. +func (d sumTypeDecl) Location() string { + return d.Pos.String() +} + +// findSumTypeDecls searches every package given for sum type declarations of +// the form `sumtype:decl`. +func findSumTypeDecls(pkgs []*packages.Package) ([]sumTypeDecl, error) { + var decls []sumTypeDecl + var retErr error + for _, pkg := range pkgs { + for _, file := range pkg.Syntax { + ast.Inspect(file, func(node ast.Node) bool { + if node == nil { + return true + } + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Doc == nil { + return true + } + var tspec *ast.TypeSpec + for _, spec := range decl.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok { + continue + } + tspec = ts + } + for _, line := range decl.Doc.List { + if !strings.HasPrefix(line.Text, "//sumtype:decl") { + continue + } + pos := pkg.Fset.Position(decl.Pos()) + if tspec == nil { + retErr = notFoundError{Decl: sumTypeDecl{Package: pkg, Pos: pos}} + return false + } + pos = pkg.Fset.Position(tspec.Pos()) + decl := sumTypeDecl{Package: pkg, TypeName: tspec.Name.Name, Pos: pos} + debugf("found sum type decl: %s.%s", decl.Package.PkgPath, decl.TypeName) + decls = append(decls, decl) + break + } + return true + }) + } + } + return decls, retErr +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/def.go b/vendor/github.com/alecthomas/go-check-sumtype/def.go new file mode 100644 index 0000000000..24729ac01b --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/def.go @@ -0,0 +1,173 @@ +package gochecksumtype + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" +) + +var debug = flag.Bool("debug", false, "enable debug logging") + +func debugf(format string, args ...interface{}) { + if *debug { + log.Printf(format, args...) + } +} + +// Error as returned by Run() +type Error interface { + error + Pos() token.Position +} + +// unsealedError corresponds to a declared sum type whose interface is not +// sealed. A sealed interface requires at least one unexported method. +type unsealedError struct { + Decl sumTypeDecl +} + +func (e unsealedError) Pos() token.Position { return e.Decl.Pos } +func (e unsealedError) Error() string { + return fmt.Sprintf( + "%s: interface '%s' is not sealed "+ + "(sealing requires at least one unexported method)", + e.Decl.Location(), e.Decl.TypeName) +} + +// notFoundError corresponds to a declared sum type whose type definition +// could not be found in the same Go package. +type notFoundError struct { + Decl sumTypeDecl +} + +func (e notFoundError) Pos() token.Position { return e.Decl.Pos } +func (e notFoundError) Error() string { + return fmt.Sprintf("%s: type '%s' is not defined", e.Decl.Location(), e.Decl.TypeName) +} + +// notInterfaceError corresponds to a declared sum type that does not +// correspond to an interface. +type notInterfaceError struct { + Decl sumTypeDecl +} + +func (e notInterfaceError) Pos() token.Position { return e.Decl.Pos } +func (e notInterfaceError) Error() string { + return fmt.Sprintf("%s: type '%s' is not an interface", e.Decl.Location(), e.Decl.TypeName) +} + +// sumTypeDef corresponds to the definition of a Go interface that is +// interpreted as a sum type. Its variants are determined by finding all types +// that implement said interface in the same package. +type sumTypeDef struct { + Decl sumTypeDecl + Ty *types.Interface + Variants []types.Object +} + +// findSumTypeDefs attempts to find a Go type definition for each of the given +// sum type declarations. If no such sum type definition could be found for +// any of the given declarations, then an error is returned. +func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) { + var defs []sumTypeDef + var errs []error + for _, decl := range decls { + def, err := newSumTypeDef(decl.Package.Types, decl) + if err != nil { + errs = append(errs, err) + continue + } + if def == nil { + errs = append(errs, notFoundError{decl}) + continue + } + defs = append(defs, *def) + } + return defs, errs +} + +// newSumTypeDef attempts to extract a sum type definition from a single +// package. If no such type corresponds to the given decl, then this function +// returns a nil def and a nil error. +// +// If the decl corresponds to a type that isn't an interface containing at +// least one unexported method, then this returns an error. +func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) { + obj := pkg.Scope().Lookup(decl.TypeName) + if obj == nil { + return nil, nil + } + iface, ok := obj.Type().Underlying().(*types.Interface) + if !ok { + return nil, notInterfaceError{decl} + } + hasUnexported := false + for i := 0; i < iface.NumMethods(); i++ { + if !iface.Method(i).Exported() { + hasUnexported = true + break + } + } + if !hasUnexported { + return nil, unsealedError{decl} + } + def := &sumTypeDef{ + Decl: decl, + Ty: iface, + } + debugf("searching for variants of %s.%s\n", pkg.Path(), decl.TypeName) + for _, name := range pkg.Scope().Names() { + obj, ok := pkg.Scope().Lookup(name).(*types.TypeName) + if !ok { + continue + } + ty := obj.Type() + if types.Identical(ty.Underlying(), iface) { + continue + } + // Skip generic types. + if named, ok := ty.(*types.Named); ok && named.TypeParams() != nil { + continue + } + if types.Implements(ty, iface) || types.Implements(types.NewPointer(ty), iface) { + debugf(" found variant: %s.%s\n", pkg.Path(), obj.Name()) + def.Variants = append(def.Variants, obj) + } + } + return def, nil +} + +func (def *sumTypeDef) String() string { + return def.Decl.TypeName +} + +// missing returns a list of variants in this sum type that are not in the +// given list of types. +func (def *sumTypeDef) missing(tys []types.Type) []types.Object { + // TODO(ag): This is O(n^2). Fix that. /shrug + var missing []types.Object + for _, v := range def.Variants { + found := false + varty := indirect(v.Type()) + for _, ty := range tys { + ty = indirect(ty) + if types.Identical(varty, ty) { + found = true + } + } + if !found { + missing = append(missing, v) + } + } + return missing +} + +// indirect dereferences through an arbitrary number of pointer types. +func indirect(ty types.Type) types.Type { + if ty, ok := ty.(*types.Pointer); ok { + return indirect(ty.Elem()) + } + return ty +} diff --git a/vendor/github.com/alecthomas/go-check-sumtype/doc.go b/vendor/github.com/alecthomas/go-check-sumtype/doc.go new file mode 100644 index 0000000000..2b6e86764e --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/doc.go @@ -0,0 +1,53 @@ +/* +sumtype takes a list of Go package paths or files and looks for sum type +declarations in each package/file provided. Exhaustiveness checks are then +performed for each use of a declared sum type in a type switch statement. +Namely, sumtype will report an error for any type switch statement that +either lacks a default clause or does not account for all possible variants. + +Declarations are provided in comments like so: + + //sumtype:decl + type MySumType interface { ... } + +MySumType must be *sealed*. That is, part of its interface definition contains +an unexported method. + +sumtype will produce an error if any of the above is not true. + +For valid declarations, sumtype will look for all occurrences in which a +value of type MySumType participates in a type switch statement. In those +occurrences, it will attempt to detect whether the type switch is exhaustive +or not. If it's not, sumtype will report an error. For example: + + $ cat mysumtype.go + package gochecksumtype + + //sumtype:decl + type MySumType interface { + sealed() + } + + type VariantA struct{} + + func (a *VariantA) sealed() {} + + type VariantB struct{} + + func (b *VariantB) sealed() {} + + func main() { + switch MySumType(nil).(type) { + case *VariantA: + } + } + $ sumtype mysumtype.go + mysumtype.go:18:2: exhaustiveness check failed for sum type 'MySumType': missing cases for VariantB + +Adding either a default clause or a clause to handle *VariantB will cause +exhaustive checks to pass. + +As a special case, if the type switch statement contains a default clause +that always panics, then exhaustiveness checks are still performed. +*/ +package gochecksumtype diff --git a/vendor/github.com/alecthomas/go-check-sumtype/run.go b/vendor/github.com/alecthomas/go-check-sumtype/run.go new file mode 100644 index 0000000000..fdcb643c5d --- /dev/null +++ b/vendor/github.com/alecthomas/go-check-sumtype/run.go @@ -0,0 +1,26 @@ +package gochecksumtype + +import "golang.org/x/tools/go/packages" + +// Run sumtype checking on the given packages. +func Run(pkgs []*packages.Package) []error { + var errs []error + + decls, err := findSumTypeDecls(pkgs) + if err != nil { + return []error{err} + } + + defs, defErrs := findSumTypeDefs(decls) + errs = append(errs, defErrs...) + if len(defs) == 0 { + return errs + } + + for _, pkg := range pkgs { + if pkgErrs := check(pkg, defs); pkgErrs != nil { + errs = append(errs, pkgErrs...) + } + } + return errs +} diff --git a/vendor/github.com/alexkohler/nakedret/v2/.gitignore b/vendor/github.com/alexkohler/nakedret/v2/.gitignore new file mode 100644 index 0000000000..b4822913a0 --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/.gitignore @@ -0,0 +1,8 @@ +# editor specific +.vscode + +# binary +/nakedret + +# usage video for docs +.github/images diff --git a/vendor/github.com/alexkohler/nakedret/v2/LICENSE b/vendor/github.com/alexkohler/nakedret/v2/LICENSE new file mode 100644 index 0000000000..9310fbcffb --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Alex Kohler + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alexkohler/nakedret/v2/README.md b/vendor/github.com/alexkohler/nakedret/v2/README.md new file mode 100644 index 0000000000..e30a0cde76 --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/README.md @@ -0,0 +1,125 @@ +# nakedret + +nakedret is a Go static analysis tool to find naked returns in functions greater than a specified function length. + +## Installation +Install Nakedret via go install: + +```cmd +go install github.com/alexkohler/nakedret/cmd/nakedret@latest +``` + +If you have not already added your `GOPATH/bin` directory to your `PATH` environment variable then you will need to do so. + +Windows (cmd): +```cmd +set PATH=%PATH%;C:\your\GOPATH\bin +``` + +Bash (you can verify a path has been set): +```Bash +# Check if nakedret is on PATH +which nakedret +export PATH=$PATH:/your/GOPATH/bin #to set path if it does not exist +``` + +## Usage + +Similar to other Go static anaylsis tools (such as `golint`, `go vet`), nakedret can be invoked with one or more filenames, directories, or packages named by its import path. Nakedret also supports the `...` wildcard. + + nakedret [flags] files/directories/packages + +Currently, the only flag supported is -l, which is an optional numeric flag to specify the maximum length a function can be (in terms of line length). If not specified, it defaults to 5. + +It can also be run using `go vet`: + +```shell +go vet -vettool=$(which nakedret) ./... +``` + +## Purpose + +As noted in Go's [Code Review comments](https://github.com/golang/go/wiki/CodeReviewComments#named-result-parameters): + +> Naked returns are okay if the function is a handful of lines. Once it's a medium sized function, be explicit with your return +> values. Corollary: it's not worth it to name result parameters just because it enables you to use naked returns. Clarity of docs is always more important than saving a line or two in your function. + +This tool aims to catch naked returns on non-trivial functions. + +## Example + +Let's take the `types` package in the Go source as an example: + +```Bash +$ nakedret -l 25 types/ +types/check.go:245 checkFiles naked returns on 26 line function +types/typexpr.go:443 collectParams naked returns on 53 line function +types/stmt.go:275 caseTypes naked returns on 27 line function +types/lookup.go:275 MissingMethod naked returns on 39 line function +``` + +Below is one of the not so intuitive uses of naked returns in `types/lookup.go` found by nakedret (nakedret will return the line number of the last naked return in the function): + + +```Go +func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) { + // fast path for common case + if T.Empty() { + return + } + + // TODO(gri) Consider using method sets here. Might be more efficient. + + if ityp, _ := V.Underlying().(*Interface); ityp != nil { + // TODO(gri) allMethods is sorted - can do this more efficiently + for _, m := range T.allMethods { + _, obj := lookupMethod(ityp.allMethods, m.pkg, m.name) + switch { + case obj == nil: + if static { + return m, false + } + case !Identical(obj.Type(), m.typ): + return m, true + } + } + return + } + + // A concrete type implements T if it implements all methods of T. + for _, m := range T.allMethods { + obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name) + + f, _ := obj.(*Func) + if f == nil { + return m, false + } + + if !Identical(f.typ, m.typ) { + return m, true + } + } + + return +} +``` + +## TODO + +- Unit tests (may require some refactoring to do correctly) +- supporting toggling of `build.Context.UseAllFiles` may be useful for some. +- Configuration on whether or not to run on test files +- Vim quickfix format? + + +## Contributing + +Pull requests welcome! + + +## Other static analysis tools + +If you've enjoyed nakedret, take a look at my other static anaylsis tools! + +- [unimport](https://github.com/alexkohler/unimport) - Finds unnecessary import aliases +- [prealloc](https://github.com/alexkohler/prealloc) - Finds slice declarations that could potentially be preallocated. diff --git a/vendor/github.com/mgechev/dots/resolve.go b/vendor/github.com/alexkohler/nakedret/v2/import.go similarity index 56% rename from vendor/github.com/mgechev/dots/resolve.go rename to vendor/github.com/alexkohler/nakedret/v2/import.go index 114534be87..dea8423336 100644 --- a/vendor/github.com/mgechev/dots/resolve.go +++ b/vendor/github.com/alexkohler/nakedret/v2/import.go @@ -1,6 +1,17 @@ -package dots +package nakedret + +/* + +This file holds a direct copy of the import path matching code of +https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be +replaced when https://golang.org/issue/8768 is resolved. + +It has been updated to follow upstream changes in a few ways. + +*/ import ( + "fmt" "go/build" "log" "os" @@ -11,205 +22,13 @@ import ( "strings" ) +var buildContext = build.Default + var ( - buildContext = build.Default - goroot = filepath.Clean(runtime.GOROOT()) - gorootSrc = filepath.Join(goroot, "src") + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") ) -func flatten(arr [][]string) []string { - var res []string - for _, e := range arr { - res = append(res, e...) - } - return res -} - -// Resolve accepts a slice of paths with optional "..." placeholder and a slice with paths to be skipped. -// The final result is the set of all files from the selected directories subtracted with -// the files in the skip slice. -func Resolve(includePatterns, skipPatterns []string) ([]string, error) { - skip, err := resolvePatternsIgnoringErrors(skipPatterns) - filter := newPathFilter(flatten(skip)) - if err != nil { - return nil, err - } - - pathSet := map[string]bool{} - includePackages, err := resolvePatterns(includePatterns) - include := flatten(includePackages) - if err != nil { - return nil, err - } - - var result []string - for _, i := range include { - if _, ok := pathSet[i]; !ok && !filter(i) { - pathSet[i] = true - result = append(result, i) - } - } - return result, err -} - -// ResolvePackages accepts a slice of paths with optional "..." placeholder and a slice with paths to be skipped. -// The final result is the set of all files from the selected directories subtracted with -// the files in the skip slice. The difference between `Resolve` and `ResolvePackages` -// is that `ResolvePackages` preserves the package structure in the nested slices. -func ResolvePackages(includePatterns, skipPatterns []string) ([][]string, error) { - skip, err := resolvePatternsIgnoringErrors(skipPatterns) - filter := newPathFilter(flatten(skip)) - if err != nil { - return nil, err - } - - pathSet := map[string]bool{} - include, err := resolvePatterns(includePatterns) - if err != nil { - return nil, err - } - - var result [][]string - for _, p := range include { - var packageFiles []string - for _, f := range p { - if _, ok := pathSet[f]; !ok && !filter(f) { - pathSet[f] = true - packageFiles = append(packageFiles, f) - } - } - result = append(result, packageFiles) - } - return result, err -} - -func isDir(filename string) bool { - fi, err := os.Stat(filename) - return err == nil && fi.IsDir() -} - -func exists(filename string) bool { - _, err := os.Stat(filename) - return err == nil -} - -func resolveDir(dirname string) ([]string, error) { - pkg, err := build.ImportDir(dirname, 0) - return resolveImportedPackage(pkg, err) -} - -func resolvePackage(pkgname string) ([]string, error) { - pkg, err := build.Import(pkgname, ".", 0) - return resolveImportedPackage(pkg, err) -} - -func resolveImportedPackage(pkg *build.Package, err error) ([]string, error) { - if err != nil { - if _, nogo := err.(*build.NoGoError); nogo { - // Don't complain if the failure is due to no Go source files. - return nil, nil - } - return nil, err - } - - var files []string - files = append(files, pkg.GoFiles...) - files = append(files, pkg.CgoFiles...) - files = append(files, pkg.TestGoFiles...) - if pkg.Dir != "." { - for i, f := range files { - files[i] = filepath.Join(pkg.Dir, f) - } - } - return files, nil -} - -func resolvePatterns(patterns []string) ([][]string, error) { - var files [][]string - for _, pattern := range patterns { - f, err := resolvePattern(pattern) - if err != nil { - return nil, err - } - files = append(files, f...) - } - return files, nil -} - -func resolvePatternsIgnoringErrors(patterns []string) ([][]string, error) { - var files [][]string - for _, pattern := range patterns { - f, err := resolvePattern(pattern) - if err != nil { - continue - } - files = append(files, f...) - } - return files, nil -} - -func resolvePattern(pattern string) ([][]string, error) { - // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to - // directory, file or package targets. The distinction affects which - // checks are run. It is no valid to mix target types. - var dirsRun, filesRun, pkgsRun int - var matches []string - - if strings.HasSuffix(pattern, "/...") && isDir(pattern[:len(pattern)-len("/...")]) { - dirsRun = 1 - for _, dirname := range matchPackagesInFS(pattern) { - matches = append(matches, dirname) - } - } else if isDir(pattern) { - dirsRun = 1 - matches = append(matches, pattern) - } else if exists(pattern) { - filesRun = 1 - matches = append(matches, pattern) - } else { - pkgsRun = 1 - matches = append(matches, pattern) - } - - result := [][]string{} - switch { - case dirsRun == 1: - for _, dir := range matches { - res, err := resolveDir(dir) - if err != nil { - return nil, err - } - result = append(result, res) - } - case filesRun == 1: - return [][]string{matches}, nil - case pkgsRun == 1: - for _, pkg := range importPaths(matches) { - res, err := resolvePackage(pkg) - if err != nil { - return nil, err - } - result = append(result, res) - } - } - return result, nil -} - -func newPathFilter(skip []string) func(string) bool { - filter := map[string]bool{} - for _, name := range skip { - filter[name] = true - } - - return func(path string) bool { - base := filepath.Base(path) - if filter[base] || filter[path] { - return true - } - return base != "." && base != ".." && strings.ContainsAny(base[0:1], "_.") - } -} - // importPathsNoDotExpansion returns the import paths to use for the given // command line, but it does no ... expansion. func importPathsNoDotExpansion(args []string) []string { @@ -235,7 +54,7 @@ func importPathsNoDotExpansion(args []string) []string { a = path.Clean(a) } if a == "all" || a == "std" { - out = append(out, matchPackages(a)...) + out = append(out, allPackages(a)...) continue } out = append(out, a) @@ -250,9 +69,9 @@ func importPaths(args []string) []string { for _, a := range args { if strings.Contains(a, "...") { if build.IsLocalImport(a) { - out = append(out, matchPackagesInFS(a)...) + out = append(out, allPackagesInFS(a)...) } else { - out = append(out, matchPackages(a)...) + out = append(out, allPackages(a)...) } continue } @@ -309,6 +128,18 @@ func treeCanMatchPattern(pattern string) func(name string) bool { } } +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages) +// or a path including "...". +func allPackages(pattern string) []string { + pkgs := matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + func matchPackages(pattern string) []string { match := func(string) bool { return true } treeCanMatch := func(string) bool { return true } @@ -374,9 +205,9 @@ func matchPackages(pattern string) []string { return nil } - // Avoid .foo, _foo, and testdata directory trees. + // Avoid .foo, _foo, testdata and vendor directory trees. _, elem := filepath.Split(path) - if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" || elem == "vendor" { return filepath.SkipDir } @@ -409,6 +240,17 @@ func matchPackages(pattern string) []string { return pkgs } +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func allPackagesInFS(pattern string) []string { + pkgs := matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + func matchPackagesInFS(pattern string) []string { // Find directory to begin the scan. // Could be smarter but this one optimization @@ -444,10 +286,10 @@ func matchPackagesInFS(pattern string) []string { path = filepath.Clean(path) } - // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + // Avoid .foo, _foo, testdata and vendor directory trees, but do not avoid "." or "..". _, elem := filepath.Split(path) dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." - if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" || elem == "vendor" { return filepath.SkipDir } diff --git a/vendor/github.com/alexkohler/nakedret/v2/nakedret.go b/vendor/github.com/alexkohler/nakedret/v2/nakedret.go new file mode 100644 index 0000000000..f78bb8cb6c --- /dev/null +++ b/vendor/github.com/alexkohler/nakedret/v2/nakedret.go @@ -0,0 +1,309 @@ +package nakedret + +import ( + "bytes" + "errors" + "flag" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const pwd = "./" + +func NakedReturnAnalyzer(defaultLines uint) *analysis.Analyzer { + nakedRet := &NakedReturnRunner{} + flags := flag.NewFlagSet("nakedret", flag.ExitOnError) + flags.UintVar(&nakedRet.MaxLength, "l", defaultLines, "maximum number of lines for a naked return function") + var analyzer = &analysis.Analyzer{ + Name: "nakedret", + Doc: "Checks that functions with naked returns are not longer than a maximum size (can be zero).", + Run: nakedRet.run, + Flags: *flags, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + return analyzer +} + +type NakedReturnRunner struct { + MaxLength uint +} + +func (n *NakedReturnRunner) run(pass *analysis.Pass) (any, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ // filter needed nodes: visit only them + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.ReturnStmt)(nil), + } + retVis := &returnsVisitor{ + pass: pass, + f: pass.Fset, + maxLength: n.MaxLength, + } + inspector.Nodes(nodeFilter, retVis.NodesVisit) + return nil, nil +} + +type returnsVisitor struct { + pass *analysis.Pass + f *token.FileSet + maxLength uint + + // functions contains funcInfo for each nested function definition encountered while visiting the AST. + functions []funcInfo +} + +type funcInfo struct { + // Details of the function we're currently dealing with + funcType *ast.FuncType + funcName string + funcLength int + reportNaked bool +} + +func checkNakedReturns(args []string, maxLength *uint, setExitStatus bool) error { + + fset := token.NewFileSet() + + files, err := parseInput(args, fset) + if err != nil { + return fmt.Errorf("could not parse input: %v", err) + } + + if maxLength == nil { + return errors.New("max length nil") + } + + analyzer := NakedReturnAnalyzer(*maxLength) + pass := &analysis.Pass{ + Analyzer: analyzer, + Fset: fset, + Files: files, + Report: func(d analysis.Diagnostic) { + log.Printf("%s:%d: %s", fset.Position(d.Pos).Filename, fset.Position(d.Pos).Line, d.Message) + }, + ResultOf: map[*analysis.Analyzer]any{}, + } + result, err := inspect.Analyzer.Run(pass) + if err != nil { + return err + } + pass.ResultOf[inspect.Analyzer] = result + + _, err = analyzer.Run(pass) + if err != nil { + return err + } + + return nil +} + +func parseInput(args []string, fset *token.FileSet) ([]*ast.File, error) { + var directoryList []string + var fileMode bool + files := make([]*ast.File, 0) + + if len(args) == 0 { + directoryList = append(directoryList, pwd) + } else { + for _, arg := range args { + if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { + + for _, dirname := range allPackagesInFS(arg) { + directoryList = append(directoryList, dirname) + } + + } else if isDir(arg) { + directoryList = append(directoryList, arg) + + } else if exists(arg) { + if strings.HasSuffix(arg, ".go") { + fileMode = true + f, err := parser.ParseFile(fset, arg, nil, 0) + if err != nil { + return nil, err + } + files = append(files, f) + } else { + return nil, fmt.Errorf("invalid file %v specified", arg) + } + } else { + + // TODO clean this up a bit + imPaths := importPaths([]string{arg}) + for _, importPath := range imPaths { + pkg, err := build.Import(importPath, ".", 0) + if err != nil { + return nil, err + } + var stringFiles []string + stringFiles = append(stringFiles, pkg.GoFiles...) + // files = append(files, pkg.CgoFiles...) + stringFiles = append(stringFiles, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range stringFiles { + stringFiles[i] = filepath.Join(pkg.Dir, f) + } + } + + fileMode = true + for _, stringFile := range stringFiles { + f, err := parser.ParseFile(fset, stringFile, nil, 0) + if err != nil { + return nil, err + } + files = append(files, f) + } + + } + } + } + } + + // if we're not in file mode, then we need to grab each and every package in each directory + // we can to grab all the files + if !fileMode { + for _, fpath := range directoryList { + pkgs, err := parser.ParseDir(fset, fpath, nil, 0) + if err != nil { + return nil, err + } + + for _, pkg := range pkgs { + for _, f := range pkg.Files { + files = append(files, f) + } + } + } + } + + return files, nil +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func hasNamedReturns(funcType *ast.FuncType) bool { + if funcType == nil || funcType.Results == nil { + return false + } + for _, field := range funcType.Results.List { + for _, ident := range field.Names { + if ident != nil { + return true + } + } + } + return false +} + +func nestedFuncName(functions []funcInfo) string { + var names []string + for _, f := range functions { + names = append(names, f.funcName) + } + return strings.Join(names, ".") +} + +func nakedReturnFix(s *ast.ReturnStmt, funcType *ast.FuncType) *ast.ReturnStmt { + var nameExprs []ast.Expr + for _, result := range funcType.Results.List { + for _, ident := range result.Names { + if ident != nil { + nameExprs = append(nameExprs, ident) + } + } + } + var sFix = *s + sFix.Results = nameExprs + return &sFix +} + +func (v *returnsVisitor) NodesVisit(node ast.Node, push bool) bool { + var ( + funcType *ast.FuncType + funcName string + ) + switch s := node.(type) { + case *ast.FuncDecl: + // We've found a function + funcType = s.Type + funcName = s.Name.Name + case *ast.FuncLit: + // We've found a function literal + funcType = s.Type + file := v.f.File(s.Pos()) + funcName = fmt.Sprintf("", file.Position(s.Pos()).Line) + case *ast.ReturnStmt: + // We've found a possibly naked return statement + fun := v.functions[len(v.functions)-1] + funName := nestedFuncName(v.functions) + if fun.reportNaked && len(s.Results) == 0 && push { + sFix := nakedReturnFix(s, fun.funcType) + b := &bytes.Buffer{} + err := printer.Fprint(b, v.f, sFix) + if err != nil { + log.Printf("failed to format named return fix: %s", err) + } + v.pass.Report(analysis.Diagnostic{ + Pos: s.Pos(), + End: s.End(), + Message: fmt.Sprintf("naked return in func `%s` with %d lines of code", funName, fun.funcLength), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "explicit return statement", + TextEdits: []analysis.TextEdit{{ + Pos: s.Pos(), + End: s.End(), + NewText: b.Bytes()}}, + }}, + }) + } + } + + if !push { + if funcType == nil { + return false + } + // Pop function info + v.functions = v.functions[:len(v.functions)-1] + return false + } + + if push && funcType != nil { + // Push function info to track returns for this function + file := v.f.File(node.Pos()) + length := file.Position(node.End()).Line - file.Position(node.Pos()).Line + if length == 0 { + // consider functions that finish on the same line as they start as single line functions, not zero lines! + length = 1 + } + v.functions = append(v.functions, funcInfo{ + funcType: funcType, + funcName: funcName, + funcLength: length, + reportNaked: uint(length) > v.maxLength && hasNamedReturns(funcType), + }) + } + + return true +} diff --git a/vendor/github.com/alingse/asasalint/.gitignore b/vendor/github.com/alingse/asasalint/.gitignore new file mode 100644 index 0000000000..d0fc531c8c --- /dev/null +++ b/vendor/github.com/alingse/asasalint/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +.vscode + +asasalint diff --git a/vendor/github.com/alingse/asasalint/.goreleaser.yml b/vendor/github.com/alingse/asasalint/.goreleaser.yml new file mode 100644 index 0000000000..e45d5860d6 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/.goreleaser.yml @@ -0,0 +1,72 @@ +--- +project_name: asasalint + +release: + github: + owner: alingse + name: asasalint + +builds: + - binary: asasalint + goos: + - darwin + - windows + - linux + - freebsd + goarch: + - amd64 + - arm64 + - arm + - 386 + - ppc64le + - s390x + - mips64 + - mips64le + - riscv64 + goarm: + - 6 + - 7 + gomips: + - hardfloat + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: 386 + - goos: freebsd + goarch: arm64 + main: ./cmd/asasalint/ + flags: + - -trimpath + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}} -X main.date={{.Date}} + +archives: + - format: tar.gz + wrap_in_directory: true + format_overrides: + - goos: windows + format: zip + name_template: '{{ .ProjectName }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + files: + - LICENSE + - README.md + +snapshot: + name_template: SNAPSHOT-{{ .Commit }} + +checksum: + name_template: '{{ .ProjectName }}-{{ .Version }}-checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - '^build\(deps\): bump .* in /docs \(#\d+\)' + - '^build\(deps\): bump .* in /\.github/peril \(#\d+\)' + - Merge pull request + - Merge branch diff --git a/vendor/github.com/alingse/asasalint/LICENSE b/vendor/github.com/alingse/asasalint/LICENSE new file mode 100644 index 0000000000..a7c39f2e3d --- /dev/null +++ b/vendor/github.com/alingse/asasalint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 alingse + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/alingse/asasalint/Makefile b/vendor/github.com/alingse/asasalint/Makefile new file mode 100644 index 0000000000..12f8ba9283 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -ldflags "-s -w" -trimpath ./cmd/asasalint/ diff --git a/vendor/github.com/alingse/asasalint/README.md b/vendor/github.com/alingse/asasalint/README.md new file mode 100644 index 0000000000..3fa7e65b34 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/README.md @@ -0,0 +1,76 @@ +# asasalint +Golang linter, lint that pass any slice as any in variadic function + + +## Install + +```sh +go install github.com/alingse/asasalint/cmd/asasalint@latest +``` + +## Usage + +```sh +asasalint ./... +``` + +ignore some func that was by desgin + +```sh +asasalint -e append,Append ./... +``` + +## Why + +two kind of unexpected usage, and `go build` success + +```Go +package main + +import "fmt" + +func A(args ...any) int { + return len(args) +} + +func B(args ...any) int { + return A(args) +} + +func main() { + + // 1 + fmt.Println(B(1, 2, 3, 4)) +} +``` + + + +```Go +package main + +import "fmt" + +func errMsg(msg string, args ...any) string { + return fmt.Sprintf(msg, args...) +} + +func Err(msg string, args ...any) string { + return errMsg(msg, args) +} + +func main() { + + // p1 [hello world] p2 %!s(MISSING) + fmt.Println(Err("p1 %s p2 %s", "hello", "world")) +} +``` + + + +## TODO + +1. add to golangci-lint +2. given a SuggestEdition +3. add `append` to default exclude ? +4. ingore pattern `fn(a, b, []any{1,2,3})` , because `[]any{1,2,3}` is most likely by design diff --git a/vendor/github.com/alingse/asasalint/asasalint.go b/vendor/github.com/alingse/asasalint/asasalint.go new file mode 100644 index 0000000000..f34516a465 --- /dev/null +++ b/vendor/github.com/alingse/asasalint/asasalint.go @@ -0,0 +1,166 @@ +package asasalint + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "go/types" + "log" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const BuiltinExclusions = `^(fmt|log|logger|t|)\.(Print|Fprint|Sprint|Fatal|Panic|Error|Warn|Warning|Info|Debug|Log)(|f|ln)$` + +type LinterSetting struct { + Exclude []string + NoBuiltinExclusions bool + IgnoreTest bool +} + +func NewAnalyzer(setting LinterSetting) (*analysis.Analyzer, error) { + a, err := newAnalyzer(setting) + if err != nil { + return nil, err + } + + return &analysis.Analyzer{ + Name: "asasalint", + Doc: "check for pass []any as any in variadic func(...any)", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + }, nil +} + +type analyzer struct { + excludes []*regexp.Regexp + setting LinterSetting +} + +func newAnalyzer(setting LinterSetting) (*analyzer, error) { + a := &analyzer{ + setting: setting, + } + + if !a.setting.NoBuiltinExclusions { + a.excludes = append(a.excludes, regexp.MustCompile(BuiltinExclusions)) + } + + for _, exclude := range a.setting.Exclude { + if exclude != "" { + exp, err := regexp.Compile(exclude) + if err != nil { + return nil, err + } + + a.excludes = append(a.excludes, exp) + } + } + + return a, nil +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspectorInfo := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{(*ast.CallExpr)(nil)} + + inspectorInfo.Preorder(nodeFilter, a.AsCheckVisitor(pass)) + return nil, nil +} + +func (a *analyzer) AsCheckVisitor(pass *analysis.Pass) func(ast.Node) { + return func(n ast.Node) { + if a.setting.IgnoreTest { + pos := pass.Fset.Position(n.Pos()) + if strings.HasSuffix(pos.Filename, "_test.go") { + return + } + } + + caller, ok := n.(*ast.CallExpr) + if !ok { + return + } + if caller.Ellipsis != token.NoPos { + return + } + if len(caller.Args) == 0 { + return + } + + fnName, err := getFuncName(pass.Fset, caller) + if err != nil { + log.Println(err) + return + } + + for _, exclude := range a.excludes { + if exclude.MatchString(fnName) { + return + } + } + + fnType := pass.TypesInfo.TypeOf(caller.Fun) + if !isSliceAnyVariadicFuncType(fnType) { + return + } + + fnSign := fnType.(*types.Signature) + if len(caller.Args) != fnSign.Params().Len() { + return + } + + lastArg := caller.Args[len(caller.Args)-1] + argType := pass.TypesInfo.TypeOf(lastArg) + if !isSliceAnyType(argType) { + return + } + node := lastArg + + d := analysis.Diagnostic{ + Pos: node.Pos(), + End: node.End(), + Message: fmt.Sprintf("pass []any as any to func %s %s", fnName, fnType.String()), + Category: "asasalint", + } + pass.Report(d) + } +} + +func getFuncName(fset *token.FileSet, caller *ast.CallExpr) (string, error) { + buf := new(bytes.Buffer) + if err := printer.Fprint(buf, fset, caller.Fun); err != nil { + return "", fmt.Errorf("unable to print node at %s: %w", fset.Position(caller.Fun.Pos()), err) + } + + return buf.String(), nil +} + +func isSliceAnyVariadicFuncType(typ types.Type) (r bool) { + fnSign, ok := typ.(*types.Signature) + if !ok || !fnSign.Variadic() { + return false + } + + params := fnSign.Params() + lastParam := params.At(params.Len() - 1) + return isSliceAnyType(lastParam.Type()) +} + +func isSliceAnyType(typ types.Type) (r bool) { + sliceType, ok := typ.(*types.Slice) + if !ok { + return + } + elemType, ok := sliceType.Elem().(*types.Interface) + if !ok { + return + } + return elemType.NumMethods() == 0 +} diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go index a39f754f06..3f0ed6682a 100644 --- a/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/config_options.go @@ -2,6 +2,10 @@ package forbidigo // Code generated by github.com/launchdarkly/go-options. DO NOT EDIT. +import "fmt" + +import "github.com/google/go-cmp/cmp" + type ApplyOptionFunc func(c *config) error func (f ApplyOptionFunc) apply(c *config) error { @@ -28,18 +32,98 @@ type Option interface { apply(*config) error } +type optionExcludeGodocExamplesImpl struct { + o bool +} + +func (o optionExcludeGodocExamplesImpl) apply(c *config) error { + c.ExcludeGodocExamples = o.o + return nil +} + +func (o optionExcludeGodocExamplesImpl) Equal(v optionExcludeGodocExamplesImpl) bool { + switch { + case !cmp.Equal(o.o, v.o): + return false + } + return true +} + +func (o optionExcludeGodocExamplesImpl) String() string { + name := "OptionExcludeGodocExamples" + + // hack to avoid go vet error about passing a function to Sprintf + var value interface{} = o.o + return fmt.Sprintf("%s: %+v", name, value) +} + // OptionExcludeGodocExamples don't check inside Godoc examples (see https://blog.golang.org/examples) -func OptionExcludeGodocExamples(o bool) ApplyOptionFunc { - return func(c *config) error { - c.ExcludeGodocExamples = o - return nil +func OptionExcludeGodocExamples(o bool) Option { + return optionExcludeGodocExamplesImpl{ + o: o, + } +} + +type optionIgnorePermitDirectivesImpl struct { + o bool +} + +func (o optionIgnorePermitDirectivesImpl) apply(c *config) error { + c.IgnorePermitDirectives = o.o + return nil +} + +func (o optionIgnorePermitDirectivesImpl) Equal(v optionIgnorePermitDirectivesImpl) bool { + switch { + case !cmp.Equal(o.o, v.o): + return false } + return true +} + +func (o optionIgnorePermitDirectivesImpl) String() string { + name := "OptionIgnorePermitDirectives" + + // hack to avoid go vet error about passing a function to Sprintf + var value interface{} = o.o + return fmt.Sprintf("%s: %+v", name, value) } // OptionIgnorePermitDirectives don't check for `permit` directives(for example, in favor of `nolint`) -func OptionIgnorePermitDirectives(o bool) ApplyOptionFunc { - return func(c *config) error { - c.IgnorePermitDirectives = o - return nil +func OptionIgnorePermitDirectives(o bool) Option { + return optionIgnorePermitDirectivesImpl{ + o: o, + } +} + +type optionAnalyzeTypesImpl struct { + o bool +} + +func (o optionAnalyzeTypesImpl) apply(c *config) error { + c.AnalyzeTypes = o.o + return nil +} + +func (o optionAnalyzeTypesImpl) Equal(v optionAnalyzeTypesImpl) bool { + switch { + case !cmp.Equal(o.o, v.o): + return false + } + return true +} + +func (o optionAnalyzeTypesImpl) String() string { + name := "OptionAnalyzeTypes" + + // hack to avoid go vet error about passing a function to Sprintf + var value interface{} = o.o + return fmt.Sprintf("%s: %+v", name, value) +} + +// OptionAnalyzeTypes enable to match canonical names for types and interfaces using type info +func OptionAnalyzeTypes(o bool) Option { + return optionAnalyzeTypesImpl{ + o: o, } } diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go index 17740faa7c..a7a3ab591e 100644 --- a/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/forbidigo.go @@ -1,4 +1,4 @@ -// forbidigo provides a linter for forbidding the use of specific identifiers +// Package forbidigo provides a linter for forbidding the use of specific identifiers package forbidigo import ( @@ -7,15 +7,15 @@ import ( "go/ast" "go/printer" "go/token" + "go/types" "log" "regexp" "strings" - - "github.com/pkg/errors" ) type Issue interface { Details() string + Pos() token.Pos Position() token.Position String() string } @@ -23,6 +23,7 @@ type Issue interface { type UsedIssue struct { identifier string pattern string + pos token.Pos position token.Position customMsg string } @@ -39,6 +40,10 @@ func (a UsedIssue) Position() token.Position { return a.position } +func (a UsedIssue) Pos() token.Pos { + return a.pos +} + func (a UsedIssue) String() string { return toString(a) } func toString(i UsedIssue) string { @@ -59,12 +64,13 @@ type config struct { // don't check inside Godoc examples (see https://blog.golang.org/examples) ExcludeGodocExamples bool `options:",true"` IgnorePermitDirectives bool // don't check for `permit` directives(for example, in favor of `nolint`) + AnalyzeTypes bool // enable to match canonical names for types and interfaces using type info } func NewLinter(patterns []string, options ...Option) (*Linter, error) { cfg, err := newConfig(options...) if err != nil { - return nil, errors.Wrapf(err, "failed to process options") + return nil, fmt.Errorf("failed to process options: %w", err) } if len(patterns) == 0 { @@ -91,19 +97,43 @@ type visitor struct { linter *Linter comments []*ast.CommentGroup - fset *token.FileSet - issues []Issue + runConfig RunConfig + issues []Issue } +// Deprecated: Run was the original entrypoint before RunWithConfig was introduced to support +// additional match patterns that need additional information. func (l *Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { - var issues []Issue //nolint:prealloc // we don't know how many there will be + return l.RunWithConfig(RunConfig{Fset: fset}, nodes...) +} + +// RunConfig provides information that the linter needs for different kinds +// of match patterns. Ideally, all fields should get set. More fields may get +// added in the future as needed. +type RunConfig struct { + // FSet is required. + Fset *token.FileSet + + // TypesInfo is needed for expanding source code expressions. + // Nil disables that step, i.e. patterns match the literal source code. + TypesInfo *types.Info + + // DebugLog is used to print debug messages. May be nil. + DebugLog func(format string, args ...interface{}) +} + +func (l *Linter) RunWithConfig(config RunConfig, nodes ...ast.Node) ([]Issue, error) { + if config.DebugLog == nil { + config.DebugLog = func(format string, args ...interface{}) {} + } + var issues []Issue for _, node := range nodes { var comments []*ast.CommentGroup isTestFile := false isWholeFileExample := false if file, ok := node.(*ast.File); ok { comments = file.Comments - fileName := fset.Position(file.Pos()).Filename + fileName := config.Fset.Position(file.Pos()).Filename isTestFile = strings.HasSuffix(fileName, "_test.go") // From https://blog.golang.org/examples, a "whole file example" is: @@ -139,7 +169,7 @@ func (l *Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { cfg: l.cfg, isTestFile: isTestFile, linter: l, - fset: fset, + runConfig: config, comments: comments, } ast.Walk(&visitor, node) @@ -156,41 +186,210 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { if isGodocExample && v.cfg.ExcludeGodocExamples { return nil } - return v + ast.Walk(v, node.Type) + if node.Body != nil { + ast.Walk(v, node.Body) + } + return nil + // Ignore constant and type names + case *ast.ValueSpec: + // Look at only type and values for const and variable specs, and not names + if node.Type != nil { + ast.Walk(v, node.Type) + } + if node.Values != nil { + for _, x := range node.Values { + ast.Walk(v, x) + } + } + return nil + // Ignore import alias names + case *ast.ImportSpec: + return nil + // Ignore type names + case *ast.TypeSpec: + // Look at only type parameters for type spec + if node.TypeParams != nil { + ast.Walk(v, node.TypeParams) + } + ast.Walk(v, node.Type) + return nil + // Ignore field names + case *ast.Field: + if node.Type != nil { + ast.Walk(v, node.Type) + } + return nil + // The following two are handled below. case *ast.SelectorExpr: case *ast.Ident: + // Everything else isn't. default: return v } + + // The text as it appears in the source is always used because issues + // use that. It's used for matching unless usage of type information + // is enabled. + srcText := v.textFor(node) + matchTexts, pkgText := v.expandMatchText(node, srcText) + v.runConfig.DebugLog("%s: match %v, package %q", v.runConfig.Fset.Position(node.Pos()), matchTexts, pkgText) for _, p := range v.linter.patterns { - if p.pattern.MatchString(v.textFor(node)) && !v.permit(node) { + if p.matches(matchTexts) && + (p.Package == "" || p.pkgRe.MatchString(pkgText)) && + !v.permit(node) { v.issues = append(v.issues, UsedIssue{ - identifier: v.textFor(node), - pattern: p.pattern.String(), - position: v.fset.Position(node.Pos()), - customMsg: p.msg, + identifier: srcText, // Always report the expression as it appears in the source code. + pattern: p.re.String(), + pos: node.Pos(), + position: v.runConfig.Fset.Position(node.Pos()), + customMsg: p.Msg, }) } } + + // descend into the left-side of selectors + if selector, isSelector := node.(*ast.SelectorExpr); isSelector { + if _, leftSideIsIdentifier := selector.X.(*ast.Ident); !leftSideIsIdentifier { + return v + } + } + return nil } +// textFor returns the expression as it appears in the source code (for +// example, .). func (v *visitor) textFor(node ast.Node) string { buf := new(bytes.Buffer) - if err := printer.Fprint(buf, v.fset, node); err != nil { - log.Fatalf("ERROR: unable to print node at %s: %s", v.fset.Position(node.Pos()), err) + if err := printer.Fprint(buf, v.runConfig.Fset, node); err != nil { + log.Fatalf("ERROR: unable to print node at %s: %s", v.runConfig.Fset.Position(node.Pos()), err) } return buf.String() } +// expandMatchText expands the selector in a selector expression to the full package +// name and (for variables) the type: +// +// - example.com/some/pkg.Function +// - example.com/some/pkg.CustomType.Method +// +// It updates the text to match against and fills the package string if possible, +// otherwise it just returns. +func (v *visitor) expandMatchText(node ast.Node, srcText string) (matchTexts []string, pkgText string) { + // The text to match against is the literal source code if we cannot + // come up with something different. + matchTexts = []string{srcText} + + if !v.cfg.AnalyzeTypes || v.runConfig.TypesInfo == nil { + return matchTexts, pkgText + } + + location := v.runConfig.Fset.Position(node.Pos()) + + switch node := node.(type) { + case *ast.Ident: + if object, ok := v.runConfig.TypesInfo.Uses[node]; !ok { + // No information about the identifier. Should + // not happen, but perhaps there were compile + // errors? + v.runConfig.DebugLog("%s: unknown identifier %q", location, srcText) + } else if pkg := object.Pkg(); pkg != nil { + pkgText = pkg.Path() + // if this is a method, don't include the package name + isMethod := false + if signature, ok := object.Type().(*types.Signature); ok && signature.Recv() != nil { + isMethod = true + } + v.runConfig.DebugLog("%s: identifier: %q -> %q in package %q", location, srcText, matchTexts, pkgText) + // match either with or without package name + if !isMethod { + matchTexts = []string{pkg.Name() + "." + srcText, srcText} + } + } else { + v.runConfig.DebugLog("%s: identifier: %q -> %q without package", location, srcText, matchTexts) + } + case *ast.SelectorExpr: + selector := node.X + field := node.Sel.Name + + // If we are lucky, the entire selector expression has a known + // type. We don't care about the value. + selectorText := v.textFor(node) + if typeAndValue, ok := v.runConfig.TypesInfo.Types[selector]; ok { + m, p, ok := typeNameWithPackage(typeAndValue.Type) + if !ok { + v.runConfig.DebugLog("%s: selector %q with supported type %T", location, selectorText, typeAndValue.Type) + } + matchTexts = []string{m + "." + field} + pkgText = p + v.runConfig.DebugLog("%s: selector %q with supported type %q: %q -> %q, package %q", location, selectorText, typeAndValue.Type.String(), srcText, matchTexts, pkgText) + } + // Some expressions need special treatment. + switch selector := selector.(type) { + case *ast.Ident: + if object, hasUses := v.runConfig.TypesInfo.Uses[selector]; hasUses { + switch object := object.(type) { + case *types.PkgName: + pkgText = object.Imported().Path() + matchTexts = []string{object.Imported().Name() + "." + field} + v.runConfig.DebugLog("%s: selector %q is package: %q -> %q, package %q", location, selectorText, srcText, matchTexts, pkgText) + case *types.Var: + if typeName, packageName, ok := typeNameWithPackage(object.Type()); ok { + matchTexts = []string{typeName + "." + field} + pkgText = packageName + v.runConfig.DebugLog("%s: selector %q is variable of type %q: %q -> %q, package %q", location, selectorText, object.Type().String(), srcText, matchTexts, pkgText) + } else { + v.runConfig.DebugLog("%s: selector %q is variable with unsupported type %T", location, selectorText, object.Type()) + } + default: + // Something else? + v.runConfig.DebugLog("%s: selector %q is identifier with unsupported type %T", location, selectorText, object) + } + } else { + // No information about the identifier. Should + // not happen, but perhaps there were compile + // errors? + v.runConfig.DebugLog("%s: unknown selector identifier %q", location, selectorText) + } + default: + v.runConfig.DebugLog("%s: selector %q of unsupported type %T", location, selectorText, selector) + } + default: + v.runConfig.DebugLog("%s: unsupported type %T", location, node) + } + return matchTexts, pkgText +} + +// typeNameWithPackage tries to determine `.` and the full +// package path. This only needs to work for types of a selector in a selector +// expression. +func typeNameWithPackage(t types.Type) (typeName, packagePath string, ok bool) { + if ptr, ok := t.(*types.Pointer); ok { + t = ptr.Elem() + } + + switch t := t.(type) { + case *types.Named: + obj := t.Obj() + pkg := obj.Pkg() + if pkg == nil { + return "", "", false + } + return pkg.Name() + "." + obj.Name(), pkg.Path(), true + default: + return "", "", false + } +} + func (v *visitor) permit(node ast.Node) bool { if v.cfg.IgnorePermitDirectives { return false } - nodePos := v.fset.Position(node.Pos()) - var nolint = regexp.MustCompile(fmt.Sprintf(`^//\s?permit:%s\b`, regexp.QuoteMeta(v.textFor(node)))) + nodePos := v.runConfig.Fset.Position(node.Pos()) + nolint := regexp.MustCompile(fmt.Sprintf(`^//\s?permit:%s\b`, regexp.QuoteMeta(v.textFor(node)))) for _, c := range v.comments { - commentPos := v.fset.Position(c.Pos()) + commentPos := v.runConfig.Fset.Position(c.Pos()) if commentPos.Line == nodePos.Line && len(c.List) > 0 && nolint.MatchString(c.List[0].Text) { return true } diff --git a/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go b/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go index c236488225..2692dcd24f 100644 --- a/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go +++ b/vendor/github.com/ashanbrown/forbidigo/forbidigo/patterns.go @@ -5,39 +5,123 @@ import ( "regexp" "regexp/syntax" "strings" + + "gopkg.in/yaml.v2" ) +// pattern matches code that is not supposed to be used. type pattern struct { - pattern *regexp.Regexp - msg string + re, pkgRe *regexp.Regexp + + // Pattern is the regular expression string that is used for matching. + // It gets matched against the literal source code text or the expanded + // text, depending on the mode in which the analyzer runs. + Pattern string `yaml:"p"` + + // Package is a regular expression for the full package path of + // an imported item. Ignored unless the analyzer is configured to + // determine that information. + Package string `yaml:"pkg,omitempty"` + + // Msg gets printed in addition to the normal message if a match is + // found. + Msg string `yaml:"msg,omitempty"` +} + +// A yamlPattern pattern in a YAML string may be represented either by a string +// (the traditional regular expression syntax) or a struct (for more complex +// patterns). +type yamlPattern pattern + +func (p *yamlPattern) UnmarshalYAML(unmarshal func(interface{}) error) error { + // Try struct first. It's unlikely that a regular expression string + // is valid YAML for a struct. + var ptrn pattern + if err := unmarshal(&ptrn); err != nil { + errStr := err.Error() + // Didn't work, try plain string. + var ptrn string + if err := unmarshal(&ptrn); err != nil { + return fmt.Errorf("pattern is neither a regular expression string (%s) nor a Pattern struct (%s)", err.Error(), errStr) + } + p.Pattern = ptrn + } else { + *p = yamlPattern(ptrn) + } + return ((*pattern)(p)).validate() } +var _ yaml.Unmarshaler = &yamlPattern{} + +// parse accepts a regular expression or, if the string starts with { or contains a line break, a +// JSON or YAML representation of a Pattern. func parse(ptrn string) (*pattern, error) { - ptrnRe, err := regexp.Compile(ptrn) + pattern := &pattern{} + + if strings.HasPrefix(strings.TrimSpace(ptrn), "{") || + strings.Contains(ptrn, "\n") { + // Embedded JSON or YAML. We can decode both with the YAML decoder. + if err := yaml.UnmarshalStrict([]byte(ptrn), pattern); err != nil { + return nil, fmt.Errorf("parsing as JSON or YAML failed: %v", err) + } + } else { + pattern.Pattern = ptrn + } + + if err := pattern.validate(); err != nil { + return nil, err + } + return pattern, nil +} + +func (p *pattern) validate() error { + ptrnRe, err := regexp.Compile(p.Pattern) if err != nil { - return nil, fmt.Errorf("unable to compile pattern `%s`: %s", ptrn, err) + return fmt.Errorf("unable to compile source code pattern `%s`: %s", p.Pattern, err) } - re, err := syntax.Parse(ptrn, syntax.Perl) + re, err := syntax.Parse(p.Pattern, syntax.Perl) if err != nil { - return nil, fmt.Errorf("unable to parse pattern `%s`: %s", ptrn, err) + return fmt.Errorf("unable to parse source code pattern `%s`: %s", p.Pattern, err) } msg := extractComment(re) - return &pattern{pattern: ptrnRe, msg: msg}, nil + if msg != "" { + p.Msg = msg + } + p.re = ptrnRe + + if p.Package != "" { + pkgRe, err := regexp.Compile(p.Package) + if err != nil { + return fmt.Errorf("unable to compile package pattern `%s`: %s", p.Package, err) + } + p.pkgRe = pkgRe + } + + return nil +} + +func (p *pattern) matches(matchTexts []string) bool { + for _, text := range matchTexts { + if p.re.MatchString(text) { + return true + } + } + return false } // Traverse the leaf submatches in the regex tree and extract a comment, if any // is present. func extractComment(re *syntax.Regexp) string { for _, sub := range re.Sub { + subStr := sub.String() + if strings.HasPrefix(subStr, "#") { + return strings.TrimSpace(strings.TrimPrefix(sub.String(), "#")) + } if len(sub.Sub) > 0 { if comment := extractComment(sub); comment != "" { return comment } } - subStr := sub.String() - if strings.HasPrefix(subStr, "#") { - return strings.TrimSpace(strings.TrimPrefix(subStr, "#")) - } } return "" } diff --git a/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go b/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go index 9b2801352d..eaf408d6f3 100644 --- a/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go +++ b/vendor/github.com/bkielbasa/cyclop/pkg/analyzer/analyzer.go @@ -9,13 +9,22 @@ import ( "golang.org/x/tools/go/analysis" ) +//nolint:gochecknoglobals +var flagSet flag.FlagSet + +//nolint:gochecknoglobals var ( - flagSet flag.FlagSet + maxComplexity int + packageAverage float64 + skipTests bool ) -var maxComplexity int -var packageAverage float64 -var skipTests bool +//nolint:gochecknoinits +func init() { + flagSet.IntVar(&maxComplexity, "maxComplexity", 10, "max complexity the function can have") + flagSet.Float64Var(&packageAverage, "packageAverage", 0, "max average complexity in package") + flagSet.BoolVar(&skipTests, "skipTests", false, "should the linter execute on test files as well") +} func NewAnalyzer() *analysis.Analyzer { return &analysis.Analyzer{ @@ -26,12 +35,6 @@ func NewAnalyzer() *analysis.Analyzer { } } -func init() { - flagSet.IntVar(&maxComplexity, "maxComplexity", 10, "max complexity the function can have") - flagSet.Float64Var(&packageAverage, "packageAverage", 0, "max avarage complexity in package") - flagSet.BoolVar(&skipTests, "skipTests", false, "should the linter execute on test files as well") -} - func run(pass *analysis.Pass) (interface{}, error) { var sum, count float64 var pkgName string @@ -70,7 +73,7 @@ func run(pass *analysis.Pass) (interface{}, error) { if packageAverage > 0 { avg := sum / count if avg > packageAverage { - pass.Reportf(pkgPos, "the avarage complexity for the package %s is %f, max is %f", pkgName, avg, packageAverage) + pass.Reportf(pkgPos, "the average complexity for the package %s is %f, max is %f", pkgName, avg, packageAverage) } } diff --git a/vendor/github.com/blizzy78/varnamelen/.editorconfig b/vendor/github.com/blizzy78/varnamelen/.editorconfig new file mode 100644 index 0000000000..7b64615455 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/.editorconfig @@ -0,0 +1,13 @@ +root = true + +[**] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +indent_style = tab +indent_size = 4 +trim_trailing_whitespace = true + +[**/*.yml] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/blizzy78/varnamelen/.gitignore b/vendor/github.com/blizzy78/varnamelen/.gitignore new file mode 100644 index 0000000000..1a4a71669f --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/.gitignore @@ -0,0 +1 @@ +/cmd/__debug_bin diff --git a/vendor/github.com/blizzy78/varnamelen/.golangci.yml b/vendor/github.com/blizzy78/varnamelen/.golangci.yml new file mode 100644 index 0000000000..566572363c --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/.golangci.yml @@ -0,0 +1,70 @@ +# https://github.com/golangci/golangci-lint/issues/456#issuecomment-617470264 +issues: + exclude-use-default: false + exclude: + # errcheck: Almost all programs ignore errors on these functions and in most cases it's ok + - Error return value of .((os\.)?std(out|err)\..*|.*print(f|ln)?|os\.(Un)?Setenv). is not checked + # golint: False positive when tests are defined in package 'test' + - func name will be used as test\.Test.* by other packages, and that stutters; consider calling this + # gosec: Duplicated errcheck checks + - G104 + # gosec: Too many issues in popular repos + - (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) + # gosec: False positive is triggered by 'src, err := ioutil.ReadFile(filename)' + - Potential file inclusion via variable + +linters: + enable: + - asciicheck + - bodyclose + - cyclop + - durationcheck + - errname + - errorlint + - exportloopref + - forcetypeassert + - gocognit + - gocritic + - goerr113 + - gofmt + - goprintffuncname + - gosec + - ifshort + - nakedret + - nestif + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - promlinter + - revive + - rowserrcheck + - sqlclosecheck + - stylecheck + - thelper + - tparallel + - unconvert + - unparam + - varnamelen + - wastedassign + - wrapcheck + - wsl + +linters-settings: + gocognit: + min-complexity: 15 + nakedret: + max-func-lines: 0 + nolintlint: + allow-unused: false + allow-leading-space: false + require-explanation: true + require-specific: true + unused: + go: 1.16 + varnamelen: + check-return: true + ignore-type-assert-ok: true + ignore-map-index-ok: true + ignore-chan-recv-ok: true diff --git a/vendor/github.com/blizzy78/varnamelen/LICENSE b/vendor/github.com/blizzy78/varnamelen/LICENSE new file mode 100644 index 0000000000..c45156a2c3 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/LICENSE @@ -0,0 +1,18 @@ +Copyright 2021-2022 Maik Schreiber + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/blizzy78/varnamelen/README.md b/vendor/github.com/blizzy78/varnamelen/README.md new file mode 100644 index 0000000000..02131ff298 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/README.md @@ -0,0 +1,155 @@ +[![GoDoc](https://pkg.go.dev/badge/github.com/blizzy78/varnamelen)](https://pkg.go.dev/github.com/blizzy78/varnamelen) + + +varnamelen +========== + +A Go Analyzer that checks that the length of a variable's name matches its usage scope: + +Variables with short names can be hard to use if the variable is used over a longer span of lines of code. +A longer variable name may be easier to comprehend. + +The analyzer also checks method receiver names, named return values, and type parameter names. + +Arbitrary declarations such as `f *foo` can be ignored, as well as idiomatic `ok` variables. +Conventional Go parameters such as `ctx context.Context` or `t *testing.T` will always be ignored. + +**Example output** + +``` +test.go:4:2: variable name 'x' is too short for the scope of its usage (varnamelen) + x := 123 + ^ +test.go:6:2: variable name 'i' is too short for the scope of its usage (varnamelen) + i := 10 + ^ +``` + + +golangci-lint Integration +------------------------- + +varnamelen is integrated into [golangci-lint] (though it may not always be the most recent version.) + +Example configuration for golangci-lint: + +```yaml +linters-settings: + varnamelen: + # The longest distance, in source lines, that is being considered a "small scope." (defaults to 5) + # Variables used in at most this many lines will be ignored. + max-distance: 5 + # The minimum length of a variable's name that is considered "long." (defaults to 3) + # Variable names that are at least this long will be ignored. + min-name-length: 3 + # Check method receiver. (defaults to false) + check-receiver: false + # Check named return values. (defaults to false) + check-return: false + # Check type parameters. (defaults to false) + check-type-param: false + # Ignore "ok" variables that hold the bool return value of a type assertion. (defaults to false) + ignore-type-assert-ok: false + # Ignore "ok" variables that hold the bool return value of a map index. (defaults to false) + ignore-map-index-ok: false + # Ignore "ok" variables that hold the bool return value of a channel receive. (defaults to false) + ignore-chan-recv-ok: false + # Optional list of variable names that should be ignored completely. (defaults to empty list) + ignore-names: + - err + # Optional list of variable declarations that should be ignored completely. (defaults to empty list) + # Entries must be in one of the following forms (see below for examples): + # - for variables, parameters, or named return values: + # - + # - * + # - for type parameters: + # - + # - for constants: + # - const + ignore-decls: + - c echo.Context + - t testing.T + - f *foo.Bar + - e error + - i int + - const C + - T any +``` + + +Standalone Usage +---------------- + +The `cmd/` folder provides a standalone command line utility. You can build it like this: + +``` +go build -o varnamelen ./cmd/ +``` + +**Usage** + +``` +varnamelen: checks that the length of a variable's name matches its scope + +Usage: varnamelen [-flag] [package] + +A variable with a short name can be hard to use if the variable is used +over a longer span of lines of code. A longer variable name may be easier +to comprehend. + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -checkReceiver + check method receiver names + -checkReturn + check named return values + -checkTypeParam + check type parameter names + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -ignoreChanRecvOk + ignore 'ok' variables that hold the bool return value of a channel receive + -ignoreDecls value + comma-separated list of ignored variable declarations + -ignoreMapIndexOk + ignore 'ok' variables that hold the bool return value of a map index + -ignoreNames value + comma-separated list of ignored variable names + -ignoreTypeAssertOk + ignore 'ok' variables that hold the bool return value of a type assertion + -json + emit JSON output + -maxDistance int + maximum number of lines of variable usage scope considered 'short' (default 5) + -memprofile string + write memory profile to this file + -minNameLength int + minimum length of variable name considered 'long' (default 3) + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + + +License +------- + +This package is licensed under the MIT license. + + + +[golangci-lint]: https://github.com/golangci/golangci-lint diff --git a/vendor/github.com/blizzy78/varnamelen/doc.go b/vendor/github.com/blizzy78/varnamelen/doc.go new file mode 100644 index 0000000000..d63c71cfb7 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/doc.go @@ -0,0 +1,3 @@ +// Package varnamelen implements an analyzer checking that the length of a variable's name +// matches its usage scope. +package varnamelen diff --git a/vendor/github.com/blizzy78/varnamelen/flags.go b/vendor/github.com/blizzy78/varnamelen/flags.go new file mode 100644 index 0000000000..ee80774f96 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/flags.go @@ -0,0 +1,109 @@ +package varnamelen + +import "strings" + +// stringsValue is the value of a list-of-strings flag. +type stringsValue struct { + Values []string +} + +// declarationsValue is the value of a list-of-declarations flag. +type declarationsValue struct { + Values []declaration +} + +// Set implements Value. +func (sv *stringsValue) Set(values string) error { + if strings.TrimSpace(values) == "" { + sv.Values = nil + return nil + } + + parts := strings.Split(values, ",") + + sv.Values = make([]string, len(parts)) + + for i, part := range parts { + sv.Values[i] = strings.TrimSpace(part) + } + + return nil +} + +// String implements Value. +func (sv *stringsValue) String() string { + return strings.Join(sv.Values, ",") +} + +// contains returns true if sv contains s. +func (sv *stringsValue) contains(s string) bool { + for _, v := range sv.Values { + if v == s { + return true + } + } + + return false +} + +// Set implements Value. +func (dv *declarationsValue) Set(values string) error { + if strings.TrimSpace(values) == "" { + dv.Values = nil + return nil + } + + parts := strings.Split(values, ",") + + dv.Values = make([]declaration, len(parts)) + + for idx, part := range parts { + dv.Values[idx] = parseDeclaration(strings.TrimSpace(part)) + } + + return nil +} + +// String implements Value. +func (dv *declarationsValue) String() string { + parts := make([]string, len(dv.Values)) + + for idx, val := range dv.Values { + parts[idx] = val.name + " " + val.typ + } + + return strings.Join(parts, ",") +} + +// matchVariable returns true if vari matches any of the declarations in dv. +func (dv *declarationsValue) matchVariable(vari variable) bool { + for _, decl := range dv.Values { + if vari.match(decl) { + return true + } + } + + return false +} + +// matchParameter returns true if param matches any of the declarations in dv. +func (dv *declarationsValue) matchParameter(param parameter) bool { + for _, decl := range dv.Values { + if param.match(decl) { + return true + } + } + + return false +} + +// matchParameter returns true if param matches any of the declarations in dv. +func (dv *declarationsValue) matchTypeParameter(param typeParam) bool { + for _, decl := range dv.Values { + if param.match(decl) { + return true + } + } + + return false +} diff --git a/vendor/github.com/blizzy78/varnamelen/typeparam.go b/vendor/github.com/blizzy78/varnamelen/typeparam.go new file mode 100644 index 0000000000..a1f3de99a3 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/typeparam.go @@ -0,0 +1,35 @@ +//go:build go1.18 +// +build go1.18 + +package varnamelen + +import "go/ast" + +// isTypeParam returns true if field is a type parameter of any of the given funcs. +func isTypeParam(field *ast.Field, funcs []*ast.FuncDecl, funcLits []*ast.FuncLit) bool { //nolint:gocognit // it's not that complicated + for _, f := range funcs { + if f.Type.TypeParams == nil { + continue + } + + for _, p := range f.Type.TypeParams.List { + if p == field { + return true + } + } + } + + for _, f := range funcLits { + if f.Type.TypeParams == nil { + continue + } + + for _, p := range f.Type.TypeParams.List { + if p == field { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/blizzy78/varnamelen/typeparam_go1.16.go b/vendor/github.com/blizzy78/varnamelen/typeparam_go1.16.go new file mode 100644 index 0000000000..7856651b90 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/typeparam_go1.16.go @@ -0,0 +1,11 @@ +//go:build (go1.16 && !go1.18) || (go1.17 && !go1.18) +// +build go1.16,!go1.18 go1.17,!go1.18 + +package varnamelen + +import "go/ast" + +// isTypeParam returns true if field is a type parameter of any of the given funcs. +func isTypeParam(_ *ast.Field, _ []*ast.FuncDecl, _ []*ast.FuncLit) bool { + return false +} diff --git a/vendor/github.com/blizzy78/varnamelen/varnamelen.code-workspace b/vendor/github.com/blizzy78/varnamelen/varnamelen.code-workspace new file mode 100644 index 0000000000..68c485c968 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/varnamelen.code-workspace @@ -0,0 +1,13 @@ +{ + "folders": [ + { + "path": "." + } + ], + "extensions": { + "recommendations": [ + "EditorConfig.EditorConfig", + "golang.go" + ] + } +} diff --git a/vendor/github.com/blizzy78/varnamelen/varnamelen.go b/vendor/github.com/blizzy78/varnamelen/varnamelen.go new file mode 100644 index 0000000000..a5b9603114 --- /dev/null +++ b/vendor/github.com/blizzy78/varnamelen/varnamelen.go @@ -0,0 +1,891 @@ +package varnamelen + +import ( + "go/ast" + "go/token" + "go/types" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// varNameLen is an analyzer that checks that the length of a variable's name matches its usage scope. +// It will create a report for a variable's assignment if that variable has a short name, but its +// usage scope is not considered "small." +type varNameLen struct { + // maxDistance is the longest distance, in source lines, that is being considered a "small scope." + maxDistance int + + // minNameLength is the minimum length of a variable's name that is considered "long." + minNameLength int + + // ignoreNames is an optional list of variable names that should be ignored completely. + ignoreNames stringsValue + + // checkReceiver determines whether method receivers should be checked. + checkReceiver bool + + // checkReturn determines whether named return values should be checked. + checkReturn bool + + // ignoreTypeAssertOk determines whether "ok" variables that hold the bool return value of a type assertion should be ignored. + ignoreTypeAssertOk bool + + // ignoreMapIndexOk determines whether "ok" variables that hold the bool return value of a map index should be ignored. + ignoreMapIndexOk bool + + // ignoreChannelReceiveOk determines whether "ok" variables that hold the bool return value of a channel receive should be ignored. + ignoreChannelReceiveOk bool + + // ignoreDeclarations is an optional list of variable declarations that should be ignored completely. + ignoreDeclarations declarationsValue + + // checkTypeParameters determines whether type parameters should be checked. + checkTypeParameters bool +} + +// variable represents a declared variable. +type variable struct { + // name is the name of the variable. + name string + + // constant is true if the variable is actually a constant. + constant bool + + // typ is the type of the variable. + typ string + + // assign is the assign statement that declares the variable. + assign *ast.AssignStmt + + // valueSpec is the value specification that declares the variable. + valueSpec *ast.ValueSpec +} + +// parameter represents a declared function or method parameter. +type parameter struct { + // name is the name of the parameter. + name string + + // typ is the type of the parameter. + typ string + + // field is the declaration of the parameter. + field *ast.Field +} + +// typeParam represents a declared type parameter. +type typeParam struct { + // name is the name of the type parameter. + name string + + // typ is the type of the type parameter. + typ string + + // field is the field that declares the type parameter. + field *ast.Field +} + +// declaration is a variable declaration. +type declaration struct { + // name is the name of the variable. + name string + + // constant is true if the variable is actually a constant. + constant bool + + // typ is the type of the variable. Not used for constants. + typ string +} + +// importDeclaration is an import declaration. +type importDeclaration struct { + // name is the short name or alias for the imported package. This is either the package's default name, + // or the alias specified in the import statement. + // Not used if self is true. + name string + + // path is the full path to the imported package. + path string + + // self is true when this is an implicit import declaration for the current package. + self bool +} + +const ( + // defaultMaxDistance is the default value for the maximum distance between the declaration of a variable and its usage + // that is considered a "small scope." + defaultMaxDistance = 5 + + // defaultMinNameLength is the default value for the minimum length of a variable's name that is considered "long." + defaultMinNameLength = 3 +) + +// conventionalDecls is a list of conventional variable declarations. +var conventionalDecls = []declaration{ + parseDeclaration("ctx context.Context"), + + parseDeclaration("b *testing.B"), + parseDeclaration("f *testing.F"), + parseDeclaration("m *testing.M"), + parseDeclaration("pb *testing.PB"), + parseDeclaration("t *testing.T"), + parseDeclaration("tb testing.TB"), +} + +// NewAnalyzer returns a new analyzer. +func NewAnalyzer() *analysis.Analyzer { + vnl := varNameLen{ + maxDistance: defaultMaxDistance, + minNameLength: defaultMinNameLength, + ignoreNames: stringsValue{}, + ignoreDeclarations: declarationsValue{}, + } + + analyzer := analysis.Analyzer{ + Name: "varnamelen", + Doc: "checks that the length of a variable's name matches its scope\n\n" + + "A variable with a short name can be hard to use if the variable is used\n" + + "over a longer span of lines of code. A longer variable name may be easier\n" + + "to comprehend.", + + Run: func(pass *analysis.Pass) (interface{}, error) { + (&vnl).run(pass) + return nil, nil + }, + + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + } + + analyzer.Flags.IntVar(&vnl.maxDistance, "maxDistance", defaultMaxDistance, "maximum number of lines of variable usage scope considered 'short'") + analyzer.Flags.IntVar(&vnl.minNameLength, "minNameLength", defaultMinNameLength, "minimum length of variable name considered 'long'") + analyzer.Flags.Var(&vnl.ignoreNames, "ignoreNames", "comma-separated list of ignored variable names") + analyzer.Flags.BoolVar(&vnl.checkReceiver, "checkReceiver", false, "check method receivers") + analyzer.Flags.BoolVar(&vnl.checkReturn, "checkReturn", false, "check named return values") + analyzer.Flags.BoolVar(&vnl.ignoreTypeAssertOk, "ignoreTypeAssertOk", false, "ignore 'ok' variables that hold the bool return value of a type assertion") + analyzer.Flags.BoolVar(&vnl.ignoreMapIndexOk, "ignoreMapIndexOk", false, "ignore 'ok' variables that hold the bool return value of a map index") + analyzer.Flags.BoolVar(&vnl.ignoreChannelReceiveOk, "ignoreChanRecvOk", false, "ignore 'ok' variables that hold the bool return value of a channel receive") + analyzer.Flags.Var(&vnl.ignoreDeclarations, "ignoreDecls", "comma-separated list of ignored variable declarations") + analyzer.Flags.BoolVar(&vnl.checkTypeParameters, "checkTypeParam", false, "check type parameters") + + return &analyzer +} + +// Run applies v to a package, according to pass. +func (v *varNameLen) run(pass *analysis.Pass) { + varToDist, paramToDist, returnToDist, typeParamToDist := v.distances(pass) + + v.checkVariables(pass, varToDist) + v.checkParams(pass, paramToDist) + v.checkReturns(pass, returnToDist) + v.checkTypeParams(pass, typeParamToDist) +} + +// checkVariables applies v to variables in varToDist. +func (v *varNameLen) checkVariables(pass *analysis.Pass, varToDist map[variable]int) { //nolint:gocognit // it's not that complicated + for variable, dist := range varToDist { + if v.ignoreNames.contains(variable.name) { + continue + } + + if v.ignoreDeclarations.matchVariable(variable) { + continue + } + + if v.checkNameAndDistance(variable.name, dist) { + continue + } + + if v.checkTypeAssertOk(variable) { + continue + } + + if v.checkMapIndexOk(variable) { + continue + } + + if v.checkChannelReceiveOk(variable) { + continue + } + + if variable.isConventional() { + continue + } + + if variable.assign != nil { + pass.Reportf(variable.assign.Pos(), "%s name '%s' is too short for the scope of its usage", variable.kindName(), variable.name) + continue + } + + pass.Reportf(variable.valueSpec.Pos(), "%s name '%s' is too short for the scope of its usage", variable.kindName(), variable.name) + } +} + +// checkParams applies v to parameters in paramToDist. +func (v *varNameLen) checkParams(pass *analysis.Pass, paramToDist map[parameter]int) { + for param, dist := range paramToDist { + if v.ignoreNames.contains(param.name) { + continue + } + + if v.ignoreDeclarations.matchParameter(param) { + continue + } + + if v.checkNameAndDistance(param.name, dist) { + continue + } + + if param.isConventional() { + continue + } + + pass.Reportf(param.field.Pos(), "parameter name '%s' is too short for the scope of its usage", param.name) + } +} + +// checkReturns applies v to named return values in returnToDist. +func (v *varNameLen) checkReturns(pass *analysis.Pass, returnToDist map[parameter]int) { + for returnValue, dist := range returnToDist { + if v.ignoreNames.contains(returnValue.name) { + continue + } + + if v.ignoreDeclarations.matchParameter(returnValue) { + continue + } + + if v.checkNameAndDistance(returnValue.name, dist) { + continue + } + + pass.Reportf(returnValue.field.Pos(), "return value name '%s' is too short for the scope of its usage", returnValue.name) + } +} + +// checkTypeParams applies v to type parameters in paramToDist. +func (v *varNameLen) checkTypeParams(pass *analysis.Pass, paramToDist map[typeParam]int) { + for param, dist := range paramToDist { + if v.ignoreNames.contains(param.name) { + continue + } + + if v.ignoreDeclarations.matchTypeParameter(param) { + continue + } + + if v.checkNameAndDistance(param.name, dist) { + continue + } + + pass.Reportf(param.field.Pos(), "type parameter name '%s' is too short for the scope of its usage", param.name) + } +} + +// checkNameAndDistance returns true if name or dist are considered "short". +func (v *varNameLen) checkNameAndDistance(name string, dist int) bool { + if len(name) >= v.minNameLength { + return true + } + + if dist <= v.maxDistance { + return true + } + + return false +} + +// checkTypeAssertOk returns true if "ok" variables that hold the bool return value of a type assertion +// should be ignored, and if vari is such a variable. +func (v *varNameLen) checkTypeAssertOk(vari variable) bool { + return v.ignoreTypeAssertOk && vari.isTypeAssertOk() +} + +// checkMapIndexOk returns true if "ok" variables that hold the bool return value of a map index +// should be ignored, and if vari is such a variable. +func (v *varNameLen) checkMapIndexOk(vari variable) bool { + return v.ignoreMapIndexOk && vari.isMapIndexOk() +} + +// checkChannelReceiveOk returns true if "ok" variables that hold the bool return value of a channel receive +// should be ignored, and if vari is such a variable. +func (v *varNameLen) checkChannelReceiveOk(vari variable) bool { + return v.ignoreChannelReceiveOk && vari.isChannelReceiveOk() +} + +// distances returns maps of variables, parameters, return values, and type parameters mapping to their longest usage distances. +func (v *varNameLen) distances(pass *analysis.Pass) (map[variable]int, map[parameter]int, map[parameter]int, map[typeParam]int) { + assignIdents, valueSpecIdents, paramIdents, returnIdents, typeParamIdents, imports, switches := v.identsAndImports(pass) + + varToDist := map[variable]int{} + + for _, ident := range assignIdents { + assign := ident.Obj.Decl.(*ast.AssignStmt) //nolint:forcetypeassert // check is done in identsAndImports + + var typ string + if isTypeSwitchAssign(assign, switches) { + typ = "" + } else { + typ = shortTypeName(pass.TypesInfo.TypeOf(ident), imports) + } + + variable := variable{ + name: ident.Name, + typ: typ, + assign: assign, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(assign.Pos()).Line + varToDist[variable] = useLine - declLine + } + + for _, ident := range valueSpecIdents { + valueSpec := ident.Obj.Decl.(*ast.ValueSpec) //nolint:forcetypeassert // check is done in identsAndImports + + variable := variable{ + name: ident.Name, + constant: ident.Obj.Kind == ast.Con, + typ: shortTypeName(pass.TypesInfo.TypeOf(ident), imports), + valueSpec: valueSpec, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(valueSpec.Pos()).Line + varToDist[variable] = useLine - declLine + } + + paramToDist := map[parameter]int{} + + for _, ident := range paramIdents { + field := ident.Obj.Decl.(*ast.Field) //nolint:forcetypeassert // check is done in identsAndImports + + param := parameter{ + name: ident.Name, + typ: shortTypeName(pass.TypesInfo.TypeOf(field.Type), imports), + field: field, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(field.Pos()).Line + paramToDist[param] = useLine - declLine + } + + returnToDist := map[parameter]int{} + + for _, ident := range returnIdents { + field := ident.Obj.Decl.(*ast.Field) //nolint:forcetypeassert // check is done in identsAndImports + + param := parameter{ + name: ident.Name, + typ: shortTypeName(pass.TypesInfo.TypeOf(ident), imports), + field: field, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(field.Pos()).Line + returnToDist[param] = useLine - declLine + } + + typeParamToDist := map[typeParam]int{} + + for _, ident := range typeParamIdents { + field := ident.Obj.Decl.(*ast.Field) //nolint:forcetypeassert // check is done in identsAndImports + + param := typeParam{ + name: ident.Name, + typ: shortTypeName(pass.TypesInfo.TypeOf(field.Type), imports), + field: field, + } + + useLine := pass.Fset.Position(ident.NamePos).Line + declLine := pass.Fset.Position(field.Pos()).Line + typeParamToDist[param] = useLine - declLine + } + + return varToDist, paramToDist, returnToDist, typeParamToDist +} + +// identsAndImports returns Idents referencing assign statements, value specifications, parameters, +// return values, and type parameters, respectively, as well as import declarations, and type switch statements. +func (v *varNameLen) identsAndImports(pass *analysis.Pass) ([]*ast.Ident, []*ast.Ident, []*ast.Ident, []*ast.Ident, //nolint:gocognit,cyclop // this is complex stuff + []*ast.Ident, []importDeclaration, []*ast.TypeSwitchStmt) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) //nolint:forcetypeassert // inspect.Analyzer always returns *inspector.Inspector + + filter := []ast.Node{ + (*ast.ImportSpec)(nil), + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + (*ast.CompositeLit)(nil), + (*ast.TypeSwitchStmt)(nil), + (*ast.Ident)(nil), + } + + assignIdents := []*ast.Ident{} + valueSpecIdents := []*ast.Ident{} + paramIdents := []*ast.Ident{} + returnIdents := []*ast.Ident{} + typeParamIdents := []*ast.Ident{} + imports := []importDeclaration{} + switches := []*ast.TypeSwitchStmt{} + + funcs := []*ast.FuncDecl{} + methods := []*ast.FuncDecl{} + funcLits := []*ast.FuncLit{} + compositeLits := []*ast.CompositeLit{} + + inspector.Preorder(filter, func(node ast.Node) { + switch node2 := node.(type) { + case *ast.ImportSpec: + decl, ok := importSpecToDecl(node2, pass.Pkg.Imports()) + if !ok { + return + } + + imports = append(imports, decl) + + case *ast.FuncDecl: + funcs = append(funcs, node2) + + if node2.Recv == nil { + return + } + + methods = append(methods, node2) + + case *ast.FuncLit: + funcLits = append(funcLits, node2) + + case *ast.CompositeLit: + compositeLits = append(compositeLits, node2) + + case *ast.TypeSwitchStmt: + switches = append(switches, node2) + + case *ast.Ident: + if node2.Obj == nil { + return + } + + if isCompositeLitKey(node2, compositeLits) { + return + } + + switch objDecl := node2.Obj.Decl.(type) { + case *ast.AssignStmt: + assignIdents = append(assignIdents, node2) + + case *ast.ValueSpec: + valueSpecIdents = append(valueSpecIdents, node2) + + case *ast.Field: + switch { + case isReceiver(objDecl, methods): + if !v.checkReceiver { + return + } + + paramIdents = append(paramIdents, node2) + + case isReturn(objDecl, funcs, funcLits): + if !v.checkReturn { + return + } + + returnIdents = append(returnIdents, node2) + + case isTypeParam(objDecl, funcs, funcLits): + if !v.checkTypeParameters { + return + } + + typeParamIdents = append(typeParamIdents, node2) + + case isParam(objDecl, funcs, funcLits, methods): + paramIdents = append(paramIdents, node2) + } + } + } + }) + + imports = append(imports, importDeclaration{ + path: pass.Pkg.Path(), + self: true, + }) + + sort.Slice(imports, func(a, b int) bool { + // reversed: longest path first + return len(imports[a].path) > len(imports[b].path) + }) + + return assignIdents, valueSpecIdents, paramIdents, returnIdents, typeParamIdents, imports, switches +} + +func importSpecToDecl(spec *ast.ImportSpec, imports []*types.Package) (importDeclaration, bool) { + path := strings.TrimSuffix(strings.TrimPrefix(spec.Path.Value, "\""), "\"") + + if spec.Name != nil { + return importDeclaration{ + name: spec.Name.Name, + path: path, + }, true + } + + for _, imp := range imports { + if imp.Path() == path { + return importDeclaration{ + name: imp.Name(), + path: path, + }, true + } + } + + return importDeclaration{}, false +} + +// isTypeAssertOk returns true if v is an "ok" variable that holds the bool return value of a type assertion. +func (v variable) isTypeAssertOk() bool { + if v.name != "ok" { + return false + } + + if v.assign == nil { + return false + } + + if len(v.assign.Lhs) != 2 { + return false + } + + ident, ok := v.assign.Lhs[1].(*ast.Ident) + if !ok { + return false + } + + if ident.Name != "ok" { + return false + } + + if len(v.assign.Rhs) != 1 { + return false + } + + if _, ok := v.assign.Rhs[0].(*ast.TypeAssertExpr); !ok { + return false + } + + return true +} + +// isMapIndexOk returns true if v is an "ok" variable that holds the bool return value of a map index. +func (v variable) isMapIndexOk() bool { + if v.name != "ok" { + return false + } + + if v.assign == nil { + return false + } + + if len(v.assign.Lhs) != 2 { + return false + } + + ident, ok := v.assign.Lhs[1].(*ast.Ident) + if !ok { + return false + } + + if ident.Name != "ok" { + return false + } + + if len(v.assign.Rhs) != 1 { + return false + } + + if _, ok := v.assign.Rhs[0].(*ast.IndexExpr); !ok { + return false + } + + return true +} + +// isChannelReceiveOk returns true if v is an "ok" variable that holds the bool return value of a channel receive. +func (v variable) isChannelReceiveOk() bool { + if v.name != "ok" { + return false + } + + if v.assign == nil { + return false + } + + if len(v.assign.Lhs) != 2 { + return false + } + + ident, ok := v.assign.Lhs[1].(*ast.Ident) + if !ok { + return false + } + + if ident.Name != "ok" { + return false + } + + if len(v.assign.Rhs) != 1 { + return false + } + + unary, ok := v.assign.Rhs[0].(*ast.UnaryExpr) + if !ok { + return false + } + + if unary.Op != token.ARROW { + return false + } + + return true +} + +// isConventional returns true if v matches a conventional Go variable/parameter name and type, +// such as "ctx context.Context" or "t *testing.T". +func (v variable) isConventional() bool { + for _, decl := range conventionalDecls { + if v.match(decl) { + return true + } + } + + return false +} + +// match returns true if v matches decl. +func (v variable) match(decl declaration) bool { + if v.name != decl.name { + return false + } + + if v.constant != decl.constant { + return false + } + + if v.constant { + return true + } + + if v.typ == "" { + return false + } + + return decl.matchType(v.typ) +} + +// kindName returns "constant" if v.constant==true, else "variable". +func (v variable) kindName() string { + if v.constant { + return "constant" + } + + return "variable" +} + +// isReceiver returns true if field is a receiver parameter of any of the given methods. +func isReceiver(field *ast.Field, methods []*ast.FuncDecl) bool { + for _, m := range methods { + for _, recv := range m.Recv.List { + if recv == field { + return true + } + } + } + + return false +} + +// isReturn returns true if field is a return value of any of the given funcs. +func isReturn(field *ast.Field, funcs []*ast.FuncDecl, funcLits []*ast.FuncLit) bool { //nolint:gocognit // it's not that complicated + for _, f := range funcs { + if f.Type.Results == nil { + continue + } + + for _, r := range f.Type.Results.List { + if r == field { + return true + } + } + } + + for _, f := range funcLits { + if f.Type.Results == nil { + continue + } + + for _, r := range f.Type.Results.List { + if r == field { + return true + } + } + } + + return false +} + +// isParam returns true if field is a parameter of any of the given funcs. +func isParam(field *ast.Field, funcs []*ast.FuncDecl, funcLits []*ast.FuncLit, methods []*ast.FuncDecl) bool { //nolint:gocognit,cyclop // it's not that complicated + for _, f := range funcs { + if f.Type.Params == nil { + continue + } + + for _, p := range f.Type.Params.List { + if p == field { + return true + } + } + } + + for _, f := range funcLits { + if f.Type.Params == nil { + continue + } + + for _, p := range f.Type.Params.List { + if p == field { + return true + } + } + } + + for _, m := range methods { + if m.Type.Params == nil { + continue + } + + for _, p := range m.Type.Params.List { + if p == field { + return true + } + } + } + + return false +} + +// isCompositeLitKey returns true if ident is a key of any of the given composite literals. +func isCompositeLitKey(ident *ast.Ident, compositeLits []*ast.CompositeLit) bool { + for _, cl := range compositeLits { + if _, ok := cl.Type.(*ast.MapType); ok { + continue + } + + for _, kvExpr := range cl.Elts { + kv, ok := kvExpr.(*ast.KeyValueExpr) + if !ok { + continue + } + + if kv.Key == ident { + return true + } + } + } + + return false +} + +// isTypeSwitchAssign returns true if assign is an assign statement of any of the given type switch statements. +func isTypeSwitchAssign(assign *ast.AssignStmt, switches []*ast.TypeSwitchStmt) bool { + for _, s := range switches { + if s.Assign == assign { + return true + } + } + + return false +} + +// isConventional returns true if v matches a conventional Go variable/parameter name and type, +// such as "ctx context.Context" or "t *testing.T". +func (p parameter) isConventional() bool { + for _, decl := range conventionalDecls { + if p.match(decl) { + return true + } + } + + return false +} + +// match returns whether p matches decl. +func (p parameter) match(decl declaration) bool { + if p.name != decl.name { + return false + } + + return decl.matchType(p.typ) +} + +// match returns whether p matches decl. +func (p typeParam) match(decl declaration) bool { + if p.name != decl.name { + return false + } + + return decl.matchType(p.typ) +} + +// parseDeclaration parses and returns a variable declaration parsed from decl. +func parseDeclaration(decl string) declaration { + if strings.HasPrefix(decl, "const ") { + return declaration{ + name: strings.TrimPrefix(decl, "const "), + constant: true, + } + } + + parts := strings.SplitN(decl, " ", 2) + + return declaration{ + name: parts[0], + typ: parts[1], + } +} + +// matchType returns true if typ matches d.typ. +func (d declaration) matchType(typ string) bool { + return d.typ == typ +} + +// shortTypeName returns the short name of typ, with respect to imports. +// For example, if package github.com/matryer/is is imported with alias "x", +// and typ represents []*github.com/matryer/is.I, shortTypeName will return "[]*x.I". +// For imports without aliases, the package's default name will be used. +func shortTypeName(typ types.Type, imports []importDeclaration) string { + if typ == nil { + return "" + } + + typStr := typ.String() + + for _, imp := range imports { + prefix := imp.path + "." + + replace := "" + if !imp.self { + replace = imp.name + "." + } + + typStr = strings.ReplaceAll(typStr, prefix, replace) + } + + return typStr +} diff --git a/vendor/github.com/bombsimon/wsl/v3/.travis.yml b/vendor/github.com/bombsimon/wsl/v3/.travis.yml deleted file mode 100644 index 5e2e26ed1c..0000000000 --- a/vendor/github.com/bombsimon/wsl/v3/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -language: go - -go: - - 1.13.x - - 1.12.x - - 1.11.x - -env: - global: - - GO111MODULE=on - -install: - - go get -v golang.org/x/tools/cmd/cover github.com/mattn/goveralls - -script: - - go test -v -covermode=count -coverprofile=coverage.out - -after_script: - - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci - -notifications: - email: false - -# vim: set ts=2 sw=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v3/.gitignore b/vendor/github.com/bombsimon/wsl/v4/.gitignore similarity index 100% rename from vendor/github.com/bombsimon/wsl/v3/.gitignore rename to vendor/github.com/bombsimon/wsl/v4/.gitignore diff --git a/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml new file mode 100644 index 0000000000..543012008f --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v4/.golangci.yml @@ -0,0 +1,81 @@ +--- +run: + deadline: 1m + issues-exit-code: 1 + tests: true + skip-dirs: + - vendor$ + +output: + format: colored-line-number + print-issued-lines: false + +linters-settings: + gocognit: + min-complexity: 10 + + depguard: + list-type: blacklist + include-go-root: false + packages: + - github.com/davecgh/go-spew/spew + + misspell: + locale: US + + gocritic: + # Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint run` + # to see all tags and checks. Empty list by default. See + # https://github.com/go-critic/go-critic#usage -> section "Tags". + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + +linters: + enable-all: true + disable: + - cyclop + - deadcode + - depguard + - dupl + - dupword + - exhaustivestruct + - exhaustruct + - forbidigo + - funlen + - gci + - gocognit + - gocyclo + - godox + - golint + - gomnd + - ifshort + - interfacer + - lll + - maintidx + - maligned + - nakedret + - nestif + - nlreturn + - nosnakecase + - paralleltest + - prealloc + - rowserrcheck + - scopelint + - structcheck + - testpackage + - varcheck + - varnamelen + - wastedassign + fast: false + + +issues: + exclude-use-default: true + max-issues-per-linter: 0 + max-same-issues: 0 + +# vim: set sw=2 ts=2 et: diff --git a/vendor/github.com/bombsimon/wsl/v3/LICENSE b/vendor/github.com/bombsimon/wsl/v4/LICENSE similarity index 100% rename from vendor/github.com/bombsimon/wsl/v3/LICENSE rename to vendor/github.com/bombsimon/wsl/v4/LICENSE diff --git a/vendor/github.com/bombsimon/wsl/v3/README.md b/vendor/github.com/bombsimon/wsl/v4/README.md similarity index 68% rename from vendor/github.com/bombsimon/wsl/v3/README.md rename to vendor/github.com/bombsimon/wsl/v4/README.md index 9812f94a79..3534b706bb 100644 --- a/vendor/github.com/bombsimon/wsl/v3/README.md +++ b/vendor/github.com/bombsimon/wsl/v4/README.md @@ -1,88 +1,59 @@ -# WSL - Whitespace Linter +# wsl - Whitespace Linter [![forthebadge](https://forthebadge.com/images/badges/made-with-go.svg)](https://forthebadge.com) [![forthebadge](https://forthebadge.com/images/badges/built-with-love.svg)](https://forthebadge.com) -[![Build Status](https://travis-ci.org/bombsimon/wsl.svg?branch=master)](https://travis-ci.org/bombsimon/wsl) +[![GitHub Actions](https://github.com/bombsimon/wsl/actions/workflows/go.yml/badge.svg)](https://github.com/bombsimon/wsl/actions/workflows/go.yml) [![Coverage Status](https://coveralls.io/repos/github/bombsimon/wsl/badge.svg?branch=master)](https://coveralls.io/github/bombsimon/wsl?branch=master) -WSL is a linter that enforces a very **non scientific** vision of how to make +`wsl` is a linter that enforces a very **non scientific** vision of how to make code more readable by enforcing empty lines at the right places. -I think too much code out there is to cuddly and a bit too warm for it's own -good, making it harder for other people to read and understand. The linter will -warn about newlines in and around blocks, in the beginning of files and other -places in the code. - -**I know this linter is aggressive** and a lot of projects I've tested it on -have failed miserably. For this linter to be useful at all I want to be open to -new ideas, configurations and discussions! Also note that some of the warnings -might be bugs or unintentional false positives so I would love an +**This linter is aggressive** and a lot of projects I've tested it on have +failed miserably. For this linter to be useful at all I want to be open to new +ideas, configurations and discussions! Also note that some of the warnings might +be bugs or unintentional false positives so I would love an [issue](https://github.com/bombsimon/wsl/issues/new) to fix, discuss, change or make something configurable! ## Installation -### By `go get` (local installation) - -You can do that by using: - ```sh -go get -u github.com/bombsimon/wsl/cmd/... +go install github.com/bombsimon/wsl/v4/cmd...@master ``` -### By golangci-lint (CI automation) - -`wsl` is already integrated with -[golangci-lint](https://github.com/golangci/golangci-lint). Please refer to the -instructions there. - ## Usage -How to use depends on how you install `wsl`. - -### With local binary +> **Note**: This linter provides a fixer that can fix most issues with the +`--fix` flag. However, currently `golangci-lint` [does not support suggested +fixes](https://github.com/golangci/golangci-lint/issues/1779) so the `--fix` +flag in `golangci-lint` will **not** work. -The general command format for `wsl` is: +`wsl` uses the [analysis](https://pkg.go.dev/golang.org/x/tools/go/analysis) +package meaning it will operate on package level with the default analysis flags +and way of working. ```sh -$ wsl [flags] [files...] -$ wsl [flags] +wsl --help +wsl [flags] -# Examples - -$ wsl ./main.go -$ wsl --no-test ./main.go -$ wsl --allow-cuddle-declarations ./main.go -$ wsl --no-test --allow-cuddle-declaration ./main.go -$ wsl --no-test --allow-trailing-comment ./myProject/... +wsl --allow-cuddle-declarations --fix ./... ``` -The "..." wildcard is not used like other `go` commands but instead can only -be to a relative or absolute path. - -By default, the linter will run on `./...` which means all go files in the -current path and all subsequent paths, including test files. To disable linting -test files, use `-n` or `--no-test`. - -### By `golangci-lint` (CI automation) - -The recommended command is: +`wsl` is also integrated in [`golangci-lint`](https://golangci-lint.run) ```sh -golangci-lint run --disable-all --enable wsl +golangci-lint run --no-config --disable-all --enable wsl ``` -For more information, please refer to -[golangci-lint](https://github.com/golangci/golangci-lint)'s documentation. - ## Issues and configuration The linter suppers a few ways to configure it to satisfy more than one kind of code style. These settings could be set either with flags or with YAML configuration if used via `golangci-lint`. -The supported configuration can be found [in the documentation](doc/configuration.md). +The supported configuration can be found [in the +documentation](doc/configuration.md). Below are the available checklist for any hit from `wsl`. If you do not see any, feel free to raise an [issue](https://github.com/bombsimon/wsl/issues/new). @@ -106,14 +77,11 @@ feel free to raise an [issue](https://github.com/bombsimon/wsl/issues/new). * [For statements should only be cuddled with assignments used in the iteration](doc/rules.md#for-statements-should-only-be-cuddled-with-assignments-used-in-the-iteration) * [Go statements can only invoke functions assigned on line above](doc/rules.md#go-statements-can-only-invoke-functions-assigned-on-line-above) * [If statements should only be cuddled with assignments](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments) -* [If statements should only be cuddled with assignments used in the if - statement - itself](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments-used-in-the-if-statement-itself) +* [If statements should only be cuddled with assignments used in the if statement itself](doc/rules.md#if-statements-should-only-be-cuddled-with-assignments-used-in-the-if-statement-itself) * [If statements that check an error must be cuddled with the statement that assigned the error](doc/rules.md#if-statements-that-check-an-error-must-be-cuddled-with-the-statement-that-assigned-the-error) -* [Only cuddled expressions if assigning variable or using from line - above](doc/rules.md#only-cuddled-expressions-if-assigning-variable-or-using-from-line-above) +* [Only cuddled expressions if assigning variable or using from line above](doc/rules.md#only-cuddled-expressions-if-assigning-variable-or-using-from-line-above) * [Only one cuddle assignment allowed before defer statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-defer-statement) -* [Only one cuddle assginment allowed before for statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-for-statement) +* [Only one cuddle assignment allowed before for statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-for-statement) * [Only one cuddle assignment allowed before go statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-go-statement) * [Only one cuddle assignment allowed before if statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-if-statement) * [Only one cuddle assignment allowed before range statement](doc/rules.md#only-one-cuddle-assignment-allowed-before-range-statement) diff --git a/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/vendor/github.com/bombsimon/wsl/v4/analyzer.go new file mode 100644 index 0000000000..b8eac15875 --- /dev/null +++ b/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -0,0 +1,141 @@ +package wsl + +import ( + "flag" + "strings" + + "golang.org/x/tools/go/analysis" +) + +func NewAnalyzer(config *Configuration) *analysis.Analyzer { + wa := &wslAnalyzer{config: config} + + return &analysis.Analyzer{ + Name: "wsl", + Doc: "add or remove empty lines", + Flags: wa.flags(), + Run: wa.run, + RunDespiteErrors: true, + } +} + +func defaultConfig() *Configuration { + return &Configuration{ + AllowAssignAndAnythingCuddle: false, + AllowAssignAndCallCuddle: true, + AllowCuddleDeclaration: false, + AllowMultiLineAssignCuddle: true, + AllowSeparatedLeadingComment: false, + AllowTrailingComment: false, + ForceCuddleErrCheckAndAssign: false, + ForceExclusiveShortDeclarations: false, + StrictAppend: true, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + ErrorVariableNames: []string{"err"}, + ForceCaseTrailingWhitespaceLimit: 0, + } +} + +// wslAnalyzer is a wrapper around the configuration which is used to be able to +// set the configuration when creating the analyzer and later be able to update +// flags and running method. +type wslAnalyzer struct { + config *Configuration +} + +func (wa *wslAnalyzer) flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + + // If we have a configuration set we're not running from the command line so + // we don't use any flags. + if wa.config != nil { + return *flags + } + + wa.config = defaultConfig() + + flags.BoolVar(&wa.config.AllowAssignAndAnythingCuddle, "allow-assign-and-anything", false, "Allow assignments and anything to be cuddled") + flags.BoolVar(&wa.config.AllowAssignAndCallCuddle, "allow-assign-and-call", true, "Allow assignments and calls to be cuddled (if using same variable/type)") + flags.BoolVar(&wa.config.AllowCuddleDeclaration, "allow-cuddle-declarations", false, "Allow declarations to be cuddled") + flags.BoolVar(&wa.config.AllowMultiLineAssignCuddle, "allow-multi-line-assign", true, "Allow cuddling with multi line assignments") + flags.BoolVar(&wa.config.AllowSeparatedLeadingComment, "allow-separated-leading-comment", false, "Allow empty newlines in leading comments") + flags.BoolVar(&wa.config.AllowTrailingComment, "allow-trailing-comment", false, "Allow blocks to end with a comment") + flags.BoolVar(&wa.config.ForceCuddleErrCheckAndAssign, "force-err-cuddling", false, "Force cuddling of error checks with error var assignment") + flags.BoolVar(&wa.config.ForceExclusiveShortDeclarations, "force-short-decl-cuddling", false, "Force short declarations to cuddle by themselves") + flags.BoolVar(&wa.config.StrictAppend, "strict-append", true, "Strict rules for append") + flags.IntVar(&wa.config.ForceCaseTrailingWhitespaceLimit, "force-case-trailing-whitespace", 0, "Force newlines for case blocks > this number.") + + flags.Var(&multiStringValue{slicePtr: &wa.config.AllowCuddleWithCalls}, "allow-cuddle-with-calls", "Comma separated list of idents that can have cuddles after") + flags.Var(&multiStringValue{slicePtr: &wa.config.AllowCuddleWithRHS}, "allow-cuddle-with-rhs", "Comma separated list of idents that can have cuddles before") + flags.Var(&multiStringValue{slicePtr: &wa.config.ErrorVariableNames}, "error-variable-names", "Comma separated list of error variable names") + + return *flags +} + +func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + filename := pass.Fset.PositionFor(file.Pos(), false).Filename + if !strings.HasSuffix(filename, ".go") { + continue + } + + processor := newProcessorWithConfig(file, pass.Fset, wa.config) + processor.parseAST() + + for pos, fix := range processor.result { + textEdits := []analysis.TextEdit{} + for _, f := range fix.fixRanges { + textEdits = append(textEdits, analysis.TextEdit{ + Pos: f.fixRangeStart, + End: f.fixRangeEnd, + NewText: []byte("\n"), + }) + } + + pass.Report(analysis.Diagnostic{ + Pos: pos, + Category: "whitespace", + Message: fix.reason, + SuggestedFixes: []analysis.SuggestedFix{ + { + TextEdits: textEdits, + }, + }, + }) + } + } + + //nolint:nilnil // A pass don't need to return anything. + return nil, nil +} + +// multiStringValue is a flag that supports multiple values. It's implemented to +// contain a pointer to a string slice that will be overwritten when the flag's +// `Set` method is called. +type multiStringValue struct { + slicePtr *[]string +} + +// Set implements the flag.Value interface and will overwrite the pointer to the +// slice with a new pointer after splitting the flag by comma. +func (m *multiStringValue) Set(value string) error { + s := []string{} + + for _, v := range strings.Split(value, ",") { + s = append(s, strings.TrimSpace(v)) + } + + *m.slicePtr = s + + return nil +} + +// Set implements the flag.Value interface. +func (m *multiStringValue) String() string { + if m.slicePtr == nil { + return "" + } + + return strings.Join(*m.slicePtr, ", ") +} diff --git a/vendor/github.com/bombsimon/wsl/v3/wsl.go b/vendor/github.com/bombsimon/wsl/v4/wsl.go similarity index 62% rename from vendor/github.com/bombsimon/wsl/v3/wsl.go rename to vendor/github.com/bombsimon/wsl/v4/wsl.go index 313b527872..6fd33335a1 100644 --- a/vendor/github.com/bombsimon/wsl/v3/wsl.go +++ b/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -3,48 +3,47 @@ package wsl import ( "fmt" "go/ast" - "go/parser" "go/token" - "io/ioutil" "reflect" + "sort" "strings" ) -// Error reason strings +// Error reason strings. const ( - reasonMustCuddleErrCheck = "if statements that check an error must be cuddled with the statement that assigned the error" - reasonOnlyCuddleIfWithAssign = "if statements should only be cuddled with assignments" - reasonOnlyOneCuddle = "only one cuddle assignment allowed before if statement" - reasonOnlyCuddleWithUsedAssign = "if statements should only be cuddled with assignments used in the if statement itself" - reasonOnlyCuddle2LineReturn = "return statements should not be cuddled if block has more than two lines" - reasonMultiLineBranchCuddle = "branch statements should not be cuddled if block has more than two lines" + reasonAnonSwitchCuddled = "anonymous switch statements should never be cuddled" reasonAppendCuddledWithoutUse = "append only allowed to cuddle with appended value" reasonAssignsCuddleAssign = "assignments should only be cuddled with other assignments" - reasonNeverCuddleDeclare = "declarations should never be cuddled" - reasonExpressionCuddledWithDeclOrRet = "expressions should not be cuddled with declarations or returns" - reasonExpressionCuddledWithBlock = "expressions should not be cuddled with blocks" - reasonExprCuddlingNonAssignedVar = "only cuddled expressions if assigning variable or using from line above" - reasonOneCuddleBeforeRange = "only one cuddle assignment allowed before range statement" - reasonRangeCuddledWithoutUse = "ranges should only be cuddled with assignments used in the iteration" - reasonOneCuddleBeforeDefer = "only one cuddle assignment allowed before defer statement" + reasonBlockEndsWithWS = "block should not end with a whitespace (or comment)" + reasonBlockStartsWithWS = "block should not start with a whitespace" + reasonCaseBlockTooCuddly = "case block should end with newline at this size" reasonDeferCuddledWithOtherVar = "defer statements should only be cuddled with expressions on same variable" - reasonForWithoutCondition = "for statement without condition should never be cuddled" - reasonForWithMoreThanOneCuddle = "only one cuddle assignment allowed before for statement" + reasonExprCuddlingNonAssignedVar = "only cuddled expressions if assigning variable or using from line above" + reasonExpressionCuddledWithBlock = "expressions should not be cuddled with blocks" + reasonExpressionCuddledWithDeclOrRet = "expressions should not be cuddled with declarations or returns" reasonForCuddledAssignWithoutUse = "for statements should only be cuddled with assignments used in the iteration" - reasonOneCuddleBeforeGo = "only one cuddle assignment allowed before go statement" + reasonForWithoutCondition = "for statement without condition should never be cuddled" reasonGoFuncWithoutAssign = "go statements can only invoke functions assigned on line above" - reasonSwitchManyCuddles = "only one cuddle assignment allowed before switch statement" - reasonAnonSwitchCuddled = "anonymous switch statements should never be cuddled" + reasonMultiLineBranchCuddle = "branch statements should not be cuddled if block has more than two lines" + reasonMustCuddleErrCheck = "if statements that check an error must be cuddled with the statement that assigned the error" + reasonNeverCuddleDeclare = "declarations should never be cuddled" + reasonOnlyCuddle2LineReturn = "return statements should not be cuddled if block has more than two lines" + reasonOnlyCuddleIfWithAssign = "if statements should only be cuddled with assignments" + reasonOnlyCuddleWithUsedAssign = "if statements should only be cuddled with assignments used in the if statement itself" + reasonOnlyOneCuddleBeforeDefer = "only one cuddle assignment allowed before defer statement" + reasonOnlyOneCuddleBeforeFor = "only one cuddle assignment allowed before for statement" + reasonOnlyOneCuddleBeforeGo = "only one cuddle assignment allowed before go statement" + reasonOnlyOneCuddleBeforeIf = "only one cuddle assignment allowed before if statement" + reasonOnlyOneCuddleBeforeRange = "only one cuddle assignment allowed before range statement" + reasonOnlyOneCuddleBeforeSwitch = "only one cuddle assignment allowed before switch statement" + reasonOnlyOneCuddleBeforeTypeSwitch = "only one cuddle assignment allowed before type switch statement" + reasonRangeCuddledWithoutUse = "ranges should only be cuddled with assignments used in the iteration" + reasonShortDeclNotExclusive = "short declaration should cuddle only with other short declarations" reasonSwitchCuddledWithoutUse = "switch statements should only be cuddled with variables switched" - reasonTypeSwitchTooCuddled = "only one cuddle assignment allowed before type switch statement" reasonTypeSwitchCuddledWithoutUse = "type switch statements should only be cuddled with variables switched" - reasonBlockStartsWithWS = "block should not start with a whitespace" - reasonBlockEndsWithWS = "block should not end with a whitespace (or comment)" - reasonCaseBlockTooCuddly = "case block should end with newline at this size" - reasonShortDeclNotExclusive = "short declaration should cuddle only with other short declarations" ) -// Warning strings +// Warning strings. const ( warnTypeNotImplement = "type not implemented" warnStmtNotImplemented = "stmt type not implemented" @@ -54,6 +53,7 @@ const ( warnUnknownRHS = "UNKNOWN RHS" ) +// Configuration represents configurable settings for the linter. type Configuration struct { // StrictAppend will do strict checking when assigning from append (x = // append(x, y)). If this is set to true the append call must append either @@ -82,7 +82,7 @@ type Configuration struct { // x = AnotherAssign() AllowAssignAndCallCuddle bool - // AllowAssignAndCallCuddle allows assignments to be cuddled with anything. + // AllowAssignAndAnythingCuddle allows assignments to be cuddled with anything. // Example supported with this set to true: // if x == 1 { // x = 0 @@ -176,92 +176,40 @@ type Configuration struct { ForceExclusiveShortDeclarations bool } -// DefaultConfig returns default configuration -func DefaultConfig() Configuration { - return Configuration{ - StrictAppend: true, - AllowAssignAndCallCuddle: true, - AllowAssignAndAnythingCuddle: false, - AllowMultiLineAssignCuddle: true, - AllowTrailingComment: false, - AllowSeparatedLeadingComment: false, - ForceCuddleErrCheckAndAssign: false, - ForceExclusiveShortDeclarations: false, - ForceCaseTrailingWhitespaceLimit: 0, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, - ErrorVariableNames: []string{"err"}, - } -} - -// Result represents the result of one error. -type Result struct { - FileName string - LineNumber int - Position token.Position - Reason string +// fix is a range to fixup. +type fix struct { + fixRangeStart token.Pos + fixRangeEnd token.Pos } -// String returns the filename, line number and reason of a Result. -func (r *Result) String() string { - return fmt.Sprintf("%s:%d: %s", r.FileName, r.LineNumber, r.Reason) +// result represents the result of one error. +type result struct { + fixRanges []fix + reason string } -type Processor struct { - config Configuration - result []Result - warnings []string - fileSet *token.FileSet +// processor is the type that keeps track of the file and fileset and holds the +// results from parsing the AST. +type processor struct { + config *Configuration file *ast.File + fileSet *token.FileSet + result map[token.Pos]result + warnings []string } -// NewProcessor will create a Processor. -func NewProcessorWithConfig(cfg Configuration) *Processor { - return &Processor{ - result: []Result{}, - config: cfg, - } -} - -// NewProcessor will create a Processor. -func NewProcessor() *Processor { - return NewProcessorWithConfig(DefaultConfig()) -} - -// ProcessFiles takes a string slice with file names (full paths) and lints -// them. -// nolint: gocritic -func (p *Processor) ProcessFiles(filenames []string) ([]Result, []string) { - for _, filename := range filenames { - data, err := ioutil.ReadFile(filename) - if err != nil { - panic(err) - } - - p.process(filename, data) +// newProcessorWithConfig will create a Processor with the passed configuration. +func newProcessorWithConfig(file *ast.File, fileSet *token.FileSet, cfg *Configuration) *processor { + return &processor{ + config: cfg, + file: file, + fileSet: fileSet, + result: make(map[token.Pos]result), } - - return p.result, p.warnings } -func (p *Processor) process(filename string, data []byte) { - fileSet := token.NewFileSet() - file, err := parser.ParseFile(fileSet, filename, data, parser.ParseComments) - - // If the file is not parsable let's add a syntax error and move on. - if err != nil { - p.result = append(p.result, Result{ - FileName: filename, - LineNumber: 0, - Reason: fmt.Sprintf("invalid syntax, file cannot be linted (%s)", err.Error()), - }) - - return - } - - p.fileSet = fileSet - p.file = file - +// parseAST will parse the AST attached to the Processor instance. +func (p *processor) parseAST() { for _, d := range p.file.Decls { switch v := d.(type) { case *ast.FuncDecl: @@ -277,7 +225,7 @@ func (p *Processor) process(filename string, data []byte) { // parseBlockBody will parse any kind of block statements such as switch cases // and if statements. A list of Result is returned. -func (p *Processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { +func (p *processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { // Nothing to do if there's no value. if reflect.ValueOf(block).IsNil() { return @@ -292,8 +240,7 @@ func (p *Processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { // parseBlockStatements will parse all the statements found in the body of a // node. A list of Result is returned. -// nolint: gocognit -func (p *Processor) parseBlockStatements(statements []ast.Stmt) { +func (p *processor) parseBlockStatements(statements []ast.Stmt) { for i, stmt := range statements { // Start by checking if this statement is another block (other than if, // for and range). This could be assignment to a function, defer or go @@ -335,7 +282,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { var calledOnLineAbove []string // Check if the previous statement spans over multiple lines. - var cuddledWithMultiLineAssignment = cuddledWithLastStmt && p.nodeStart(previousStatement) != p.nodeStart(stmt)-1 + cuddledWithMultiLineAssignment := cuddledWithLastStmt && p.nodeStart(previousStatement) != p.nodeStart(stmt)-1 // Ensure previous line is not a multi line assignment and if not get // rightAndLeftHandSide assigned variables. @@ -379,14 +326,38 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } - moreThanOneStatementAbove := func() bool { - if i < 2 { + nStatementsBefore := func(n int) bool { + if i < n { return false } - statementBeforePreviousStatement := statements[i-2] + for j := 1; j < n; j++ { + s1 := statements[i-j] + s2 := statements[i-(j+1)] + + if p.nodeStart(s1)-1 != p.nodeEnd(s2) { + return false + } + } + + return true + } + + nStatementsAfter := func(n int) bool { + if len(statements)-1 < i+n { + return false + } + + for j := 0; j < n; j++ { + s1 := statements[i+j] + s2 := statements[i+j+1] + + if p.nodeEnd(s1)+1 != p.nodeStart(s2) { + return false + } + } - return p.nodeStart(previousStatement)-1 == p.nodeEnd(statementBeforePreviousStatement) + return true } isLastStatementInBlockOfOnlyTwoLines := func() bool { @@ -397,8 +368,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // t.X = true // return t // } - // nolint: gocritic - if i == len(statements)-1 && i == 1 { + if len(statements) == 2 && i == 1 { if p.nodeEnd(stmt)-p.nodeStart(previousStatement) <= 2 { return true } @@ -413,9 +383,30 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // it was and use *that* statement's position if p.config.ForceExclusiveShortDeclarations && cuddledWithLastStmt { if p.isShortDecl(stmt) && !p.isShortDecl(previousStatement) { - p.addError(stmt.Pos(), reasonShortDeclNotExclusive) + var reportNode ast.Node = previousStatement + + cm := ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) + if cg, ok := cm[stmt]; ok && len(cg) > 0 { + for _, c := range cg { + if c.Pos() > previousStatement.End() && c.End() < stmt.Pos() { + reportNode = c + } + } + } + + p.addErrorRange( + stmt.Pos(), + reportNode.End(), + reportNode.End(), + reasonShortDeclNotExclusive, + ) } else if p.isShortDecl(previousStatement) && !p.isShortDecl(stmt) { - p.addError(previousStatement.Pos(), reasonShortDeclNotExclusive) + p.addErrorRange( + previousStatement.Pos(), + stmt.Pos(), + stmt.Pos(), + reasonShortDeclNotExclusive, + ) } } @@ -428,6 +419,31 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { } } + reportNewlineTwoLinesAbove := func(n1, n2 ast.Node, reason string) { + if atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) || + atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + // If both the assignment on the line above _and_ the assignment + // two lines above is part of line or first in block, add the + // newline as if non were. + _, isAssignmentTwoLinesAbove := statements[i-2].(*ast.AssignStmt) + assignedTwoLinesAbove := p.findLHS(statements[i-2]) + + if isAssignmentTwoLinesAbove && + (atLeastOneInListsMatch(rightAndLeftHandSide, assignedTwoLinesAbove) || + atLeastOneInListsMatch(assignedTwoLinesAbove, calledOrAssignedFirstInBlock)) { + p.addWhitespaceBeforeError(n1, reason) + } else { + // If the variable on the line above is allowed to be + // cuddled, break two lines above so we keep the proper + // cuddling. + p.addErrorRange(n1.Pos(), n2.Pos(), n2.Pos(), reason) + } + } else { + // If not, break here so we separate the cuddled variable. + p.addWhitespaceBeforeError(n1, reason) + } + } + switch t := stmt.(type) { case *ast.IfStmt: checkingErrInitializedInline := func() bool { @@ -460,7 +476,12 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { } if atLeastOneInListsMatch(assignedOnLineAbove, p.config.ErrorVariableNames) { - p.addError(t.Pos(), reasonMustCuddleErrCheck) + p.addErrorRange( + stmt.Pos(), + previousStatement.End(), + stmt.Pos(), + reasonMustCuddleErrCheck, + ) } } @@ -468,12 +489,12 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { } if len(assignedOnLineAbove) == 0 { - p.addError(t.Pos(), reasonOnlyCuddleIfWithAssign) + p.addWhitespaceBeforeError(t, reasonOnlyCuddleIfWithAssign) continue } - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOnlyOneCuddle) + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeIf) continue } @@ -485,33 +506,34 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } - p.addError(t.Pos(), reasonOnlyCuddleWithUsedAssign) + p.addWhitespaceBeforeError(t, reasonOnlyCuddleWithUsedAssign) case *ast.ReturnStmt: if isLastStatementInBlockOfOnlyTwoLines() { continue } - p.addError(t.Pos(), reasonOnlyCuddle2LineReturn) + p.addWhitespaceBeforeError(t, reasonOnlyCuddle2LineReturn) case *ast.BranchStmt: if isLastStatementInBlockOfOnlyTwoLines() { continue } - p.addError(t.Pos(), reasonMultiLineBranchCuddle) + p.addWhitespaceBeforeError(t, reasonMultiLineBranchCuddle) case *ast.AssignStmt: // append is usually an assignment but should not be allowed to be // cuddled with anything not appended. if len(rightHandSide) > 0 && rightHandSide[len(rightHandSide)-1] == "append" { if p.config.StrictAppend { if !atLeastOneInListsMatch(calledOrAssignedOnLineAbove, rightHandSide) { - p.addError(t.Pos(), reasonAppendCuddledWithoutUse) + p.addWhitespaceBeforeError(t, reasonAppendCuddledWithoutUse) } } continue } - if _, ok := previousStatement.(*ast.AssignStmt); ok { + switch previousStatement.(type) { + case *ast.AssignStmt, *ast.IncDecStmt: continue } @@ -535,10 +557,18 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { } } - p.addError(t.Pos(), reasonAssignsCuddleAssign) + p.addWhitespaceBeforeError(t, reasonAssignsCuddleAssign) + case *ast.IncDecStmt: + switch previousStatement.(type) { + case *ast.AssignStmt, *ast.IncDecStmt: + continue + } + + p.addWhitespaceBeforeError(t, reasonAssignsCuddleAssign) + case *ast.DeclStmt: if !p.config.AllowCuddleDeclaration { - p.addError(t.Pos(), reasonNeverCuddleDeclare) + p.addWhitespaceBeforeError(t, reasonNeverCuddleDeclare) } case *ast.ExprStmt: switch previousStatement.(type) { @@ -547,9 +577,9 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } - p.addError(t.Pos(), reasonExpressionCuddledWithDeclOrRet) + p.addWhitespaceBeforeError(t, reasonExpressionCuddledWithDeclOrRet) case *ast.IfStmt, *ast.RangeStmt, *ast.SwitchStmt: - p.addError(t.Pos(), reasonExpressionCuddledWithBlock) + p.addWhitespaceBeforeError(t, reasonExpressionCuddledWithBlock) } // If the expression is called on a type or variable used or @@ -567,17 +597,17 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // If we assigned variables on the line above but didn't use them in // this expression there should probably be a newline between them. if len(assignedOnLineAbove) > 0 && !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - p.addError(t.Pos(), reasonExprCuddlingNonAssignedVar) + p.addWhitespaceBeforeError(t, reasonExprCuddlingNonAssignedVar) } case *ast.RangeStmt: - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOneCuddleBeforeRange) + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeRange) continue } if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - p.addError(t.Pos(), reasonRangeCuddledWithoutUse) + p.addWhitespaceBeforeError(t, reasonRangeCuddledWithoutUse) } } case *ast.DeferStmt: @@ -586,27 +616,38 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } - // Special treatment of deferring body closes after error checking - // according to best practices. See - // https://github.com/bombsimon/wsl/issues/31 which links to - // discussion about error handling after HTTP requests. This is hard - // coded and very specific but for now this is to be seen as a - // special case. What this does is that it *only* allows a defer - // statement with `Close` on the right hand side to be cuddled with - // an if-statement to support this: - // resp, err := client.Do(req) - // if err != nil { - // return err - // } - // defer resp.Body.Close() - if _, ok := previousStatement.(*ast.IfStmt); ok { - if atLeastOneInListsMatch(rightHandSide, []string{"Close"}) { - continue + if nStatementsBefore(2) { + // We allow cuddling defer if the defer references something + // used two lines above. + // There are several reasons to why we do this. + // Originally there was a special use case only for "Close" + // + // https://github.com/bombsimon/wsl/issues/31 which links to + // resp, err := client.Do(req) + // if err != nil { + // return err + // } + // defer resp.Body.Close() + // + // After a discussion in a followup issue it makes sense to not + // only hard code `Close` but for anything that's referenced two + // statements above. + // + // https://github.com/bombsimon/wsl/issues/85 + // db, err := OpenDB() + // require.NoError(t, err) + // defer db.Close() + // + // All of this is only allowed if there's exactly three cuddled + // statements, otherwise the regular rules apply. + if !nStatementsBefore(3) && !nStatementsAfter(1) { + variablesTwoLinesAbove := append(p.findLHS(statements[i-2]), p.findRHS(statements[i-2])...) + if atLeastOneInListsMatch(rightHandSide, variablesTwoLinesAbove) { + continue + } } - } - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOneCuddleBeforeDefer) + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeDefer) continue } @@ -620,7 +661,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { } // Allow use to cuddled defer func literals with usages on line - // abouve. Example: + // above. Example: // b := getB() // defer func() { // makesSenseToUse(b) @@ -638,18 +679,16 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { } if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - p.addError(t.Pos(), reasonDeferCuddledWithOtherVar) + p.addWhitespaceBeforeError(t, reasonDeferCuddledWithOtherVar) } case *ast.ForStmt: if len(rightAndLeftHandSide) == 0 { - p.addError(t.Pos(), reasonForWithoutCondition) - + p.addWhitespaceBeforeError(t, reasonForWithoutCondition) continue } - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonForWithMoreThanOneCuddle) - + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeFor) continue } @@ -658,7 +697,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // first line in the block for details. if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - p.addError(t.Pos(), reasonForCuddledAssignWithoutUse) + p.addWhitespaceBeforeError(t, reasonForCuddledAssignWithoutUse) } } case *ast.GoStmt: @@ -666,33 +705,46 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonOneCuddleBeforeGo) - + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeGo) continue } + if c, ok := t.Call.Fun.(*ast.SelectorExpr); ok { + goCallArgs := append(p.findLHS(c.X), p.findRHS(c.X)...) + + if atLeastOneInListsMatch(calledOnLineAbove, goCallArgs) { + continue + } + } + + if c, ok := t.Call.Fun.(*ast.FuncLit); ok { + goCallArgs := append(p.findLHS(c.Body), p.findRHS(c.Body)...) + + if atLeastOneInListsMatch(assignedOnLineAbove, goCallArgs) { + continue + } + } + if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - p.addError(t.Pos(), reasonGoFuncWithoutAssign) + p.addWhitespaceBeforeError(t, reasonGoFuncWithoutAssign) } case *ast.SwitchStmt: - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonSwitchManyCuddles) - + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeSwitch) continue } if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { if len(rightAndLeftHandSide) == 0 { - p.addError(t.Pos(), reasonAnonSwitchCuddled) + p.addWhitespaceBeforeError(t, reasonAnonSwitchCuddled) } else { - p.addError(t.Pos(), reasonSwitchCuddledWithoutUse) + p.addWhitespaceBeforeError(t, reasonSwitchCuddledWithoutUse) } } case *ast.TypeSwitchStmt: - if moreThanOneStatementAbove() { - p.addError(t.Pos(), reasonTypeSwitchTooCuddled) - + if nStatementsBefore(2) { + reportNewlineTwoLinesAbove(t, statements[i-1], reasonOnlyOneCuddleBeforeTypeSwitch) continue } @@ -701,11 +753,11 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // Allow type assertion on variables used in the first case // immediately. if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { - p.addError(t.Pos(), reasonTypeSwitchCuddledWithoutUse) + p.addWhitespaceBeforeError(t, reasonTypeSwitchCuddledWithoutUse) } } case *ast.CaseClause, *ast.CommClause: - // Case clauses will be checked by not allowing leading ot trailing + // Case clauses will be checked by not allowing leading of trailing // whitespaces within the block. There's nothing in the case itself // that may be cuddled. default: @@ -719,7 +771,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // directly as the first argument inside a body. // The body will then be parsed as a *ast.BlockStmt (regular block) or as a list // of []ast.Stmt (case block). -func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { +func (p *processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { stmt := allStmt[i] // Start by checking if the statement has a body (probably if-statement, @@ -750,7 +802,14 @@ func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { } } - p.parseBlockBody(nil, statementBodyContent) + // If statement bodies will be parsed already when finding block bodies. + // The reason is because if/else-if/else chains is nested in the AST + // where the else bit is a part of the if statement. Since if statements + // is the only statement that can be chained like this we exclude it + // from parsing it again here. + if _, ok := stmt.(*ast.IfStmt); !ok { + p.parseBlockBody(nil, statementBodyContent) + } case []ast.Stmt: // The Body field for an *ast.CaseClause or *ast.CommClause is of type // []ast.Stmt. We must check leading and trailing whitespaces and then @@ -775,7 +834,7 @@ func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { return firstBodyStatement } -func (p *Processor) findLHS(node ast.Node) []string { +func (p *processor) findLHS(node ast.Node) []string { var lhs []string if node == nil { @@ -834,7 +893,7 @@ func (p *Processor) findLHS(node ast.Node) []string { return lhs } -func (p *Processor) findRHS(node ast.Node) []string { +func (p *processor) findRHS(node ast.Node) []string { var rhs []string if node == nil { @@ -845,8 +904,7 @@ func (p *Processor) findRHS(node ast.Node) []string { case *ast.BasicLit, *ast.SelectStmt, *ast.ChanType, *ast.LabeledStmt, *ast.DeclStmt, *ast.BranchStmt, *ast.TypeSpec, *ast.ArrayType, *ast.CaseClause, - *ast.CommClause, *ast.KeyValueExpr, *ast.MapType, - *ast.FuncLit: + *ast.CommClause, *ast.MapType, *ast.FuncLit: // Nothing to add to RHS case *ast.Ident: return []string{t.Name} @@ -905,6 +963,9 @@ func (p *Processor) findRHS(node ast.Node) []string { rhs = append(rhs, p.findRHS(t.X)...) rhs = append(rhs, p.findRHS(t.Low)...) rhs = append(rhs, p.findRHS(t.High)...) + case *ast.KeyValueExpr: + rhs = p.findRHS(t.Key) + rhs = append(rhs, p.findRHS(t.Value)...) default: if x, ok := maybeX(t); ok { return p.findRHS(x) @@ -916,7 +977,7 @@ func (p *Processor) findRHS(node ast.Node) []string { return rhs } -func (p *Processor) isShortDecl(node ast.Node) bool { +func (p *processor) isShortDecl(node ast.Node) bool { if t, ok := node.(*ast.AssignStmt); ok { return t.Tok == token.DEFINE } @@ -924,16 +985,22 @@ func (p *Processor) isShortDecl(node ast.Node) bool { return false } -func (p *Processor) findBlockStmt(node ast.Node) []*ast.BlockStmt { +func (p *processor) findBlockStmt(node ast.Node) []*ast.BlockStmt { var blocks []*ast.BlockStmt switch t := node.(type) { + case *ast.BlockStmt: + return []*ast.BlockStmt{t} case *ast.AssignStmt: for _, x := range t.Rhs { blocks = append(blocks, p.findBlockStmt(x)...) } case *ast.CallExpr: blocks = append(blocks, p.findBlockStmt(t.Fun)...) + + for _, x := range t.Args { + blocks = append(blocks, p.findBlockStmt(x)...) + } case *ast.FuncLit: blocks = append(blocks, t.Body) case *ast.ExprStmt: @@ -946,6 +1013,8 @@ func (p *Processor) findBlockStmt(node ast.Node) []*ast.BlockStmt { blocks = append(blocks, p.findBlockStmt(t.Call)...) case *ast.GoStmt: blocks = append(blocks, p.findBlockStmt(t.Call)...) + case *ast.IfStmt: + blocks = append([]*ast.BlockStmt{t.Body}, p.findBlockStmt(t.Else)...) } return blocks @@ -1002,16 +1071,15 @@ func atLeastOneInListsMatch(listOne, listTwo []string) bool { // findLeadingAndTrailingWhitespaces will find leading and trailing whitespaces // in a node. The method takes comments in consideration which will make the // parser more gentle. -// nolint: gocognit -func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { +func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { var ( - allowedLinesBeforeFirstStatement = 1 - commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) - blockStatements []ast.Stmt - blockStartLine int - blockEndLine int - blockStartPos token.Pos - blockEndPos token.Pos + commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) + blockStatements []ast.Stmt + blockStartLine int + blockEndLine int + blockStartPos token.Pos + blockEndPos token.Pos + isCase bool ) // Depending on the block type, get the statements in the block and where @@ -1024,9 +1092,11 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne case *ast.CaseClause: blockStatements = t.Body blockStartPos = t.Colon + isCase = true case *ast.CommClause: blockStatements = t.Body blockStartPos = t.Colon + isCase = true default: p.addWarning(warnWSNodeTypeNotImplemented, stmt.Pos(), stmt) @@ -1038,8 +1108,8 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne return } - blockStartLine = p.fileSet.Position(blockStartPos).Line - blockEndLine = p.fileSet.Position(blockEndPos).Line + blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line + blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line // No whitespace possible if LBrace and RBrace is on the same line. if blockStartLine == blockEndLine { @@ -1047,152 +1117,238 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne } var ( - firstStatement = blockStatements[0] - lastStatement = blockStatements[len(blockStatements)-1] - seenCommentGroups = 0 + firstStatement = blockStatements[0] + lastStatement = blockStatements[len(blockStatements)-1] ) - // Get the comment related to the first statement, we do allow commends in + // Get the comment related to the first statement, we do allow comments in // the beginning of a block before the first statement. - if c, ok := commentMap[firstStatement]; ok { - for _, commentGroup := range c { - // If the comment group is on the same line as the block start - // (LBrace) we should not consider it. - if p.nodeStart(commentGroup) == blockStartLine { + var ( + openingNodePos = blockStartPos + 1 + lastLeadingComment ast.Node + ) + + var ( + firstStatementCommentGroups []*ast.CommentGroup + lastStatementCommentGroups []*ast.CommentGroup + ) + + if cg, ok := commentMap[firstStatement]; ok && !isCase { + firstStatementCommentGroups = cg + } else { + // TODO: Just like with trailing whitespaces comments in a case block is + // tied to the last token of the first statement. For now we iterate over + // all comments in the stmt and grab those that's after colon and before + // first statement. + for _, cg := range commentMap { + if len(cg) < 1 { continue } - // We only care about comments before our statement from the comment - // map. As soon as we hit comments after our statement let's break - // out! - if commentGroup.Pos() > firstStatement.Pos() { - break + // If we have comments and the last comment ends before the first + // statement and the node is after the colon, this must be the node + // mapped to comments. + for _, c := range cg { + if c.End() < firstStatement.Pos() && c.Pos() > blockStartPos { + firstStatementCommentGroups = append(firstStatementCommentGroups, c) + } } - // We store number of seen comment groups because we allow multiple - // groups with a newline between them; but if the first one has WS - // before it, we're not going to count it to force an error. - if p.config.AllowSeparatedLeadingComment { - cg := p.fileSet.Position(commentGroup.Pos()).Line + // And same if we have comments where the first comment is after the + // last statement but before the next statement (next case). As with + // the other things, if there is not next statement it's no next + // case and the logic will be handled when parsing the block. + if nextStatement == nil { + continue + } - if seenCommentGroups > 0 || cg == blockStartLine+1 { - seenCommentGroups++ + for _, c := range cg { + if c.Pos() > lastStatement.End() && c.End() < nextStatement.Pos() { + lastStatementCommentGroups = append(lastStatementCommentGroups, c) } - } else { - seenCommentGroups++ } + } + + // Since the comments come from a map they might not be ordered meaning + // that the last and first comment groups can be in the wrong order. We + // fix this by sorting all comments by pos after adding them all to the + // slice. + sort.Slice(firstStatementCommentGroups, func(i, j int) bool { + return firstStatementCommentGroups[i].Pos() < firstStatementCommentGroups[j].Pos() + }) + + sort.Slice(lastStatementCommentGroups, func(i, j int) bool { + return lastStatementCommentGroups[i].Pos() < lastStatementCommentGroups[j].Pos() + }) + } + + for _, commentGroup := range firstStatementCommentGroups { + // If the comment group is on the same line as the block start + // (LBrace) we should not consider it. + if p.nodeEnd(commentGroup) == blockStartLine { + openingNodePos = commentGroup.End() + continue + } + + // We only care about comments before our statement from the comment + // map. As soon as we hit comments after our statement let's break + // out! + if commentGroup.Pos() > firstStatement.Pos() { + break + } + + // We never allow leading whitespace for the first comment. + if lastLeadingComment == nil && p.nodeStart(commentGroup)-1 != blockStartLine { + p.addErrorRange( + openingNodePos, + openingNodePos, + commentGroup.Pos(), + reasonBlockStartsWithWS, + ) + } - // Support both /* multiline */ and //single line comments - for _, c := range commentGroup.List { - allowedLinesBeforeFirstStatement += len(strings.Split(c.Text, "\n")) + // If lastLeadingComment is set this is not the first comment so we + // should remove whitespace between them if we don't explicitly + // allow it. + if lastLeadingComment != nil && !p.config.AllowSeparatedLeadingComment { + if p.nodeStart(commentGroup)+1 != p.nodeEnd(lastLeadingComment) { + p.addErrorRange( + openingNodePos, + lastLeadingComment.End(), + commentGroup.Pos(), + reasonBlockStartsWithWS, + ) } } + + lastLeadingComment = commentGroup } - // If we allow separated comments, allow for a space after each group - if p.config.AllowSeparatedLeadingComment { - if seenCommentGroups > 1 { - allowedLinesBeforeFirstStatement += seenCommentGroups - 1 - } else if seenCommentGroups == 1 { - allowedLinesBeforeFirstStatement += 1 - } + lastNodePos := openingNodePos + if lastLeadingComment != nil { + lastNodePos = lastLeadingComment.End() + blockStartLine = p.nodeEnd(lastLeadingComment) } - // And now if the first statement is passed the number of allowed lines, - // then we had extra WS, possibly before the first comment group. - if p.nodeStart(firstStatement) > blockStartLine+allowedLinesBeforeFirstStatement { - p.addError( - blockStartPos, + // Check if we have a whitespace between the last node which can be the + // Lbrace, a comment on the same line or the last comment if we have + // comments inside the actual block and the first statement. This is never + // allowed. + if p.nodeStart(firstStatement)-1 != blockStartLine { + p.addErrorRange( + openingNodePos, + lastNodePos, + firstStatement.Pos(), reasonBlockStartsWithWS, ) } // If the blockEndLine is not 0 we're a regular block (not case). if blockEndLine != 0 { - if p.config.AllowTrailingComment { - if lastComment, ok := commentMap[lastStatement]; ok { - var ( - lastCommentGroup = lastComment[len(lastComment)-1] - lastCommentLine = lastCommentGroup.List[len(lastCommentGroup.List)-1] - countNewlines = 0 - ) + // We don't want to reject example functions since they have to end with + // a comment. + if isExampleFunc(ident) { + return + } + + var ( + lastNode ast.Node = lastStatement + trailingComments []ast.Node + ) - countNewlines += len(strings.Split(lastCommentLine.Text, "\n")) + // Check if we have an comments _after_ the last statement and update + // the last node if so. + if c, ok := commentMap[lastStatement]; ok { + lastComment := c[len(c)-1] + if lastComment.Pos() > lastStatement.End() && lastComment.Pos() < stmt.End() { + lastNode = lastComment + } + } - // No newlines between trailing comments and end of block. - if p.nodeStart(lastCommentLine)+countNewlines != blockEndLine-1 { - return + // TODO: This should be improved. + // The trailing comments are mapped to the last statement item which can + // be anything depending on what the last statement is. + // In `fmt.Println("hello")`, trailing comments will be mapped to + // `*ast.BasicLit` for the "hello" string. + // A short term improvement can be to cache this but for now we naively + // iterate over all items when we check a block. + for _, commentGroups := range commentMap { + for _, commentGroup := range commentGroups { + if commentGroup.Pos() < lastNode.End() || commentGroup.End() > stmt.End() { + continue } + + trailingComments = append(trailingComments, commentGroup) } } - if p.nodeEnd(lastStatement) != blockEndLine-1 && !isExampleFunc(ident) { - p.addError(blockEndPos, reasonBlockEndsWithWS) + // TODO: Should this be relaxed? + // Given the old code we only allowed trailing newline if it was + // directly tied to the last statement so for backwards compatibility + // we'll do the same. This means we fail all but the last whitespace + // even when allowing trailing comments. + for _, comment := range trailingComments { + if p.nodeStart(comment)-p.nodeEnd(lastNode) > 1 { + p.addErrorRange( + blockEndPos, + lastNode.End(), + comment.Pos(), + reasonBlockEndsWithWS, + ) + } + + lastNode = comment + } + + if !p.config.AllowTrailingComment && p.nodeEnd(stmt)-1 != p.nodeEnd(lastStatement) { + p.addErrorRange( + blockEndPos, + lastNode.End(), + stmt.End()-1, + reasonBlockEndsWithWS, + ) } return } + // Nothing to do if we're not looking for enforced newline. + if p.config.ForceCaseTrailingWhitespaceLimit == 0 { + return + } + // If we don't have any nextStatement the trailing whitespace will be // handled when parsing the switch. If we do have a next statement we can // see where it starts by getting it's colon position. We set the end of the // current case to the position of the next case. - switch n := nextStatement.(type) { - case *ast.CaseClause: - blockEndPos = n.Case - case *ast.CommClause: - blockEndPos = n.Case + switch nextStatement.(type) { + case *ast.CaseClause, *ast.CommClause: default: // No more cases return } - blockEndLine = p.fileSet.Position(blockEndPos).Line - 1 - - var ( - blockSize = blockEndLine - blockStartLine - caseTrailingCommentLines int - ) - - // TODO: I don't know what comments are bound to in cases. For regular - // blocks the last comment is bound to the last statement but for cases - // they are bound to the case clause expression. This will however get us all - // comments and depending on the case expression this gets tricky. - // - // To handle this I get the comment map from the current statement (the case - // itself) and iterate through all groups and all comment within all groups. - // I then get the comments after the last statement but before the next case - // clause and just map each line of comment that way. - for _, commentGroups := range commentMap { - for _, commentGroup := range commentGroups { - for _, comment := range commentGroup.List { - commentLine := p.fileSet.Position(comment.Pos()).Line - - // Ignore comments before the last statement. - if commentLine <= p.nodeStart(lastStatement) { - continue - } - - // Ignore comments after the end of this case. - if commentLine > blockEndLine { - continue - } - - // This allows /* multiline */ comments with newlines as well - // as regular (//) ones - caseTrailingCommentLines += len(strings.Split(comment.Text, "\n")) - } - } + var closingNode ast.Node = lastStatement + for _, commentGroup := range lastStatementCommentGroups { + // TODO: In future versions we might want to close the gaps between + // comments. However this is not currently reported in v3 so we + // won't add this for now. + // if p.nodeStart(commentGroup)-1 != p.nodeEnd(closingNode) {} + closingNode = commentGroup } - hasTrailingWhitespace := p.nodeEnd(lastStatement)+caseTrailingCommentLines != blockEndLine + totalRowsInCase := p.nodeEnd(closingNode) - blockStartLine + if totalRowsInCase < p.config.ForceCaseTrailingWhitespaceLimit { + return + } - // If the force trailing limit is configured and we don't end with a newline. - if p.config.ForceCaseTrailingWhitespaceLimit > 0 && !hasTrailingWhitespace { - // Check if the block size is too big to miss the newline. - if blockSize >= p.config.ForceCaseTrailingWhitespaceLimit { - p.addError(lastStatement.Pos(), reasonCaseBlockTooCuddly) - } + if p.nodeEnd(closingNode)+1 == p.nodeStart(nextStatement) { + p.addErrorRange( + closingNode.Pos(), + closingNode.End(), + closingNode.End(), + reasonCaseBlockTooCuddly, + ) } } @@ -1200,15 +1356,15 @@ func isExampleFunc(ident *ast.Ident) bool { return ident != nil && strings.HasPrefix(ident.Name, "Example") } -func (p *Processor) nodeStart(node ast.Node) int { - return p.fileSet.Position(node.Pos()).Line +func (p *processor) nodeStart(node ast.Node) int { + return p.fileSet.PositionFor(node.Pos(), false).Line } -func (p *Processor) nodeEnd(node ast.Node) int { - var line = p.fileSet.Position(node.End()).Line +func (p *processor) nodeEnd(node ast.Node) int { + line := p.fileSet.PositionFor(node.End(), false).Line if isEmptyLabeledStmt(node) { - return p.fileSet.Position(node.Pos()).Line + return p.fileSet.PositionFor(node.Pos(), false).Line } return line @@ -1225,21 +1381,29 @@ func isEmptyLabeledStmt(node ast.Node) bool { return empty } -// Add an error for the file and line number for the current token.Pos with the -// given reason. -func (p *Processor) addError(pos token.Pos, reason string) { - position := p.fileSet.Position(pos) +func (p *processor) addWhitespaceBeforeError(node ast.Node, reason string) { + p.addErrorRange(node.Pos(), node.Pos(), node.Pos(), reason) +} - p.result = append(p.result, Result{ - FileName: position.Filename, - LineNumber: position.Line, - Position: position, - Reason: reason, +func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string) { + report, ok := p.result[reportAt] + if !ok { + report = result{ + reason: reason, + fixRanges: []fix{}, + } + } + + report.fixRanges = append(report.fixRanges, fix{ + fixRangeStart: start, + fixRangeEnd: end, }) + + p.result[reportAt] = report } -func (p *Processor) addWarning(w string, pos token.Pos, t interface{}) { - position := p.fileSet.Position(pos) +func (p *processor) addWarning(w string, pos token.Pos, t interface{}) { + position := p.fileSet.PositionFor(pos, false) p.warnings = append(p.warnings, fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), diff --git a/vendor/github.com/breml/bidichk/LICENSE b/vendor/github.com/breml/bidichk/LICENSE new file mode 100644 index 0000000000..47a8419ce9 --- /dev/null +++ b/vendor/github.com/breml/bidichk/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Lucas Bremgartner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/breml/bidichk/pkg/bidichk/bidichk.go b/vendor/github.com/breml/bidichk/pkg/bidichk/bidichk.go new file mode 100644 index 0000000000..f1bf20faba --- /dev/null +++ b/vendor/github.com/breml/bidichk/pkg/bidichk/bidichk.go @@ -0,0 +1,180 @@ +package bidichk + +import ( + "bytes" + "flag" + "fmt" + "go/token" + "os" + "sort" + "strings" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" +) + +const ( + doc = "bidichk detects dangerous unicode character sequences" + disallowedDoc = `comma separated list of disallowed runes (full name or short name) + +Supported runes + +LEFT-TO-RIGHT-EMBEDDING, LRE (u+202A) +RIGHT-TO-LEFT-EMBEDDING, RLE (u+202B) +POP-DIRECTIONAL-FORMATTING, PDF (u+202C) +LEFT-TO-RIGHT-OVERRIDE, LRO (u+202D) +RIGHT-TO-LEFT-OVERRIDE, RLO (u+202E) +LEFT-TO-RIGHT-ISOLATE, LRI (u+2066) +RIGHT-TO-LEFT-ISOLATE, RLI (u+2067) +FIRST-STRONG-ISOLATE, FSI (u+2068) +POP-DIRECTIONAL-ISOLATE, PDI (u+2069) +` +) + +type disallowedRunes map[string]rune + +func (m disallowedRunes) String() string { + ss := make([]string, 0, len(m)) + for s := range m { + ss = append(ss, s) + } + sort.Strings(ss) + return strings.Join(ss, ",") +} + +func (m disallowedRunes) Set(s string) error { + ss := strings.FieldsFunc(s, func(c rune) bool { return c == ',' }) + if len(ss) == 0 { + return nil + } + + for k := range m { + delete(m, k) + } + + for _, v := range ss { + switch v { + case runeShortNameLRE, runeShortNameRLE, runeShortNamePDF, + runeShortNameLRO, runeShortNameRLO, runeShortNameLRI, + runeShortNameRLI, runeShortNameFSI, runeShortNamePDI: + v = shortNameLookup[v] + fallthrough + case runeNameLRE, runeNameRLE, runeNamePDF, + runeNameLRO, runeNameRLO, runeNameLRI, + runeNameRLI, runeNameFSI, runeNamePDI: + m[v] = runeLookup[v] + default: + return fmt.Errorf("unknown check name %q (see help for full list)", v) + } + } + return nil +} + +const ( + runeNameLRE = "LEFT-TO-RIGHT-EMBEDDING" + runeNameRLE = "RIGHT-TO-LEFT-EMBEDDING" + runeNamePDF = "POP-DIRECTIONAL-FORMATTING" + runeNameLRO = "LEFT-TO-RIGHT-OVERRIDE" + runeNameRLO = "RIGHT-TO-LEFT-OVERRIDE" + runeNameLRI = "LEFT-TO-RIGHT-ISOLATE" + runeNameRLI = "RIGHT-TO-LEFT-ISOLATE" + runeNameFSI = "FIRST-STRONG-ISOLATE" + runeNamePDI = "POP-DIRECTIONAL-ISOLATE" + + runeShortNameLRE = "LRE" // LEFT-TO-RIGHT-EMBEDDING + runeShortNameRLE = "RLE" // RIGHT-TO-LEFT-EMBEDDING + runeShortNamePDF = "PDF" // POP-DIRECTIONAL-FORMATTING + runeShortNameLRO = "LRO" // LEFT-TO-RIGHT-OVERRIDE + runeShortNameRLO = "RLO" // RIGHT-TO-LEFT-OVERRIDE + runeShortNameLRI = "LRI" // LEFT-TO-RIGHT-ISOLATE + runeShortNameRLI = "RLI" // RIGHT-TO-LEFT-ISOLATE + runeShortNameFSI = "FSI" // FIRST-STRONG-ISOLATE + runeShortNamePDI = "PDI" // POP-DIRECTIONAL-ISOLATE +) + +var runeLookup = map[string]rune{ + runeNameLRE: '\u202A', // LEFT-TO-RIGHT-EMBEDDING + runeNameRLE: '\u202B', // RIGHT-TO-LEFT-EMBEDDING + runeNamePDF: '\u202C', // POP-DIRECTIONAL-FORMATTING + runeNameLRO: '\u202D', // LEFT-TO-RIGHT-OVERRIDE + runeNameRLO: '\u202E', // RIGHT-TO-LEFT-OVERRIDE + runeNameLRI: '\u2066', // LEFT-TO-RIGHT-ISOLATE + runeNameRLI: '\u2067', // RIGHT-TO-LEFT-ISOLATE + runeNameFSI: '\u2068', // FIRST-STRONG-ISOLATE + runeNamePDI: '\u2069', // POP-DIRECTIONAL-ISOLATE +} + +var shortNameLookup = map[string]string{ + runeShortNameLRE: runeNameLRE, + runeShortNameRLE: runeNameRLE, + runeShortNamePDF: runeNamePDF, + runeShortNameLRO: runeNameLRO, + runeShortNameRLO: runeNameRLO, + runeShortNameLRI: runeNameLRI, + runeShortNameRLI: runeNameRLI, + runeShortNameFSI: runeNameFSI, + runeShortNamePDI: runeNamePDI, +} + +type bidichk struct { + disallowedRunes disallowedRunes +} + +// NewAnalyzer return a new bidichk analyzer. +func NewAnalyzer() *analysis.Analyzer { + bidichk := bidichk{} + bidichk.disallowedRunes = make(map[string]rune, len(runeLookup)) + for k, v := range runeLookup { + bidichk.disallowedRunes[k] = v + } + + a := &analysis.Analyzer{ + Name: "bidichk", + Doc: doc, + Run: bidichk.run, + } + + a.Flags.Init("bidichk", flag.ExitOnError) + a.Flags.Var(&bidichk.disallowedRunes, "disallowed-runes", disallowedDoc) + a.Flags.Var(versionFlag{}, "V", "print version and exit") + + return a +} + +func (b bidichk) run(pass *analysis.Pass) (interface{}, error) { + var err error + + pass.Fset.Iterate(func(f *token.File) bool { + if strings.HasPrefix(f.Name(), "$GOROOT") { + return true + } + + return b.check(f.Name(), f.Pos(0), pass) == nil + }) + + return nil, err +} + +func (b bidichk) check(filename string, pos token.Pos, pass *analysis.Pass) error { + body, err := os.ReadFile(filename) + if err != nil { + return err + } + + for name, r := range b.disallowedRunes { + start := 0 + for { + idx := bytes.IndexRune(body[start:], r) + if idx == -1 { + break + } + start += idx + + pass.Reportf(pos+token.Pos(start), "found dangerous unicode character sequence %s", name) + + start += utf8.RuneLen(r) + } + } + + return nil +} diff --git a/vendor/github.com/breml/bidichk/pkg/bidichk/version.go b/vendor/github.com/breml/bidichk/pkg/bidichk/version.go new file mode 100644 index 0000000000..4cfc57dd1e --- /dev/null +++ b/vendor/github.com/breml/bidichk/pkg/bidichk/version.go @@ -0,0 +1,19 @@ +package bidichk + +import ( + "fmt" + "os" +) + +var Version = "bidichk version dev" + +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + fmt.Println(Version) + os.Exit(0) + return nil +} diff --git a/vendor/github.com/breml/errchkjson/.gitignore b/vendor/github.com/breml/errchkjson/.gitignore new file mode 100644 index 0000000000..0362de3016 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/.gitignore @@ -0,0 +1,29 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +/errchkjson +/cmd/errchkjson/errchkjson + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +coverage.html + +# Log files +*.log + +# Env files +.env + +# Exclude todo +TODO.md + +# Exclude IDE settings +.idea/ +*.iml +.vscode/ diff --git a/vendor/github.com/breml/errchkjson/.goreleaser.yml b/vendor/github.com/breml/errchkjson/.goreleaser.yml new file mode 100644 index 0000000000..a05c172cb6 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/.goreleaser.yml @@ -0,0 +1,34 @@ +# This is an example .goreleaser.yml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + # You may remove this if you don't use go modules. + - go mod tidy +builds: + - main: ./cmd/errchkjson + binary: errchkjson + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +archives: + - name_template: >- + {{- .Binary }}_ + {{- .Version }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end -}} +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + skip: true +release: + github: + owner: breml + name: errchkjson +gomod: + proxy: true diff --git a/vendor/github.com/breml/errchkjson/LICENSE b/vendor/github.com/breml/errchkjson/LICENSE new file mode 100644 index 0000000000..08db5cb6fc --- /dev/null +++ b/vendor/github.com/breml/errchkjson/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Lucas Bremgartner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/breml/errchkjson/README.md b/vendor/github.com/breml/errchkjson/README.md new file mode 100644 index 0000000000..1979597387 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/README.md @@ -0,0 +1,131 @@ +# errchkjson + +[![Test Status](https://github.com/breml/errchkjson/actions/workflows/ci.yml/badge.svg)](https://github.com/breml/errchkjson/actions/workflows/ci.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/breml/errchkjson)](https://goreportcard.com/report/github.com/breml/errchkjson) [![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) + +Checks types passed to the json encoding functions. Reports unsupported types and reports occurrences where the check for the returned error can be omitted. + +Consider this [http.Handler](https://pkg.go.dev/net/http#Handler): + +```Go +func JSONHelloWorld(w http.ResponseWriter, r *http.Request) { + response := struct { + Message string + Code int + }{ + Message: "Hello World", + Code: 200, + } + + body, err := json.Marshal(response) + if err != nil { + panic(err) // unreachable, because json encoding of a struct with just a string and an int will never return an error. + } + + w.Write(body) +} +``` + +Because the `panic` is not possible to happen, one might refactor the code like this: + +```Go +func JSONHelloWorld(w http.ResponseWriter, r *http.Request) { + response := struct { + Message string + Code int + }{ + Message: "Hello World", + Code: 200, + } + + body, _ := json.Marshal(response) + + w.Write(body) +} +``` + +This is ok, as long as the struct is not altered in such a way, that could potentially lead +to `json.Marshal` returning an error. + +`errchkjson` allows you to lint your code such that the above error returned from `json.Marshal` +can be omitted while still staying safe, because as soon as an unsafe type is added to the +response type, the linter will warn you. + +## Installation + +Download `errchkjson` from the [releases](https://github.com/breml/errchkjson/releases) or get the latest version from source with: + +```shell +go get github.com/breml/errchkjson/cmd/errchkjson +``` + +## Usage + +### Shell + +Check everything: + +```shell +errchkjson ./... +``` + +`errchkjson` also recognizes the following command-line options: + +The `-omit-safe` flag disables checking for safe returns of errors from json.Marshal + +## Types + +### Safe + +The following types are safe to use with [json encoding functions](https://pkg.go.dev/encoding/json), that is, the encoding to JSON can not fail: + +Safe basic types: + +* `bool` +* `int`, `int8`, `int16`, `int32`, `int64`, `uint`, `uint8`, `uint16`, `uint32`, `uint64`, `uintptr` +* `string` +* Pointer type of the above listed basic types + +Composed types (struct, map, slice, array) are safe, if the type of the value is +safe. For structs, only exported fields are relevant. For maps, the key needs to be either an integer type or a string. + +### Unsafe + +The following types are unsafe to use with [json encoding functions](https://pkg.go.dev/encoding/json), that is, the encoding to JSON can fail (return an error): + +Unsafe basic types: + +* `float32`, `float64` +* `interface{}` +* Pointer type of the above listed basic types + +Any composed types (struct, map, slice, array) containing an unsafe basic type. + +If a type implements the `json.Marshaler` or `encoding.TextMarshaler` interface (e.g. `json.Number`). + +### Forbidden + +Forbidden basic types: + +* `complex64`, `complex128` +* `chan` +* `func` +* `unsafe.Pointer` + +Any composed types (struct, map, slice, array) containing a forbidden basic type. Any map +using a key with a forbidden type (`bool`, `float32`, `float64`, `struct`). + +## Accepted edge case + +For `encoding/json.MarshalIndent`, there is a (pathological) edge case, where this +function could [return an error](https://cs.opensource.google/go/go/+/refs/tags/go1.18:src/encoding/json/scanner.go;drc=refs%2Ftags%2Fgo1.18;l=181) for an otherwise safe argument, if the argument has +a nesting depth larger than [`10000`](https://cs.opensource.google/go/go/+/refs/tags/go1.18:src/encoding/json/scanner.go;drc=refs%2Ftags%2Fgo1.18;l=144) (as of Go 1.18). + +## Bugs found during development + +During the development of `errcheckjson`, the following issues in package `encoding/json` of the Go standard library have been found and PR have been merged: + +* [Issue #34154: encoding/json: string option (struct tag) on string field with SetEscapeHTML(false) escapes anyway](https://github.com/golang/go/issues/34154) +* [PR #34127: encoding/json: fix and optimize marshal for quoted string](https://github.com/golang/go/pull/34127) +* [Issue #34268: encoding/json: wrong encoding for json.Number field with string option (struct tag)](https://github.com/golang/go/issues/34268) +* [PR #34269: encoding/json: make Number with the ,string option marshal with quotes](https://github.com/golang/go/pull/34269) +* [PR #34272: encoding/json: validate strings when decoding into Number](https://github.com/golang/go/pull/34272) diff --git a/vendor/github.com/breml/errchkjson/errchkjson.go b/vendor/github.com/breml/errchkjson/errchkjson.go new file mode 100644 index 0000000000..4a23929cf2 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/errchkjson.go @@ -0,0 +1,348 @@ +// Package errchkjson defines an Analyzer that finds places, where it is +// safe to omit checking the error returned from json.Marshal. +package errchkjson + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/typeutil" +) + +type errchkjson struct { + omitSafe bool // -omit-safe flag + reportNoExported bool // -report-no-exported flag +} + +// NewAnalyzer returns a new errchkjson analyzer. +func NewAnalyzer() *analysis.Analyzer { + errchkjson := &errchkjson{} + + a := &analysis.Analyzer{ + Name: "errchkjson", + Doc: "Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted.", + Run: errchkjson.run, + } + + a.Flags.Init("errchkjson", flag.ExitOnError) + a.Flags.BoolVar(&errchkjson.omitSafe, "omit-safe", false, "if omit-safe is true, checking of safe returns is omitted") + a.Flags.BoolVar(&errchkjson.reportNoExported, "report-no-exported", false, "if report-no-exported is true, encoding a struct without exported fields is reported as issue") + a.Flags.Var(versionFlag{}, "V", "print version and exit") + + return a +} + +func (e *errchkjson) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + ast.Inspect(file, func(n ast.Node) bool { + if n == nil { + return true + } + + // if the error is returned, it is the caller's responsibility to check + // the return value. + if _, ok := n.(*ast.ReturnStmt); ok { + return false + } + + ce, ok := n.(*ast.CallExpr) + if ok { + fn, _ := typeutil.Callee(pass.TypesInfo, ce).(*types.Func) + if fn == nil { + return true + } + + switch fn.FullName() { + case "encoding/json.Marshal", "encoding/json.MarshalIndent": + e.handleJSONMarshal(pass, ce, fn.FullName(), blankIdentifier, e.omitSafe) + case "(*encoding/json.Encoder).Encode": + e.handleJSONMarshal(pass, ce, fn.FullName(), blankIdentifier, true) + default: + e.inspectArgs(pass, ce.Args) + } + return false + } + + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + + ce, ok = as.Rhs[0].(*ast.CallExpr) + if !ok { + return true + } + + fn, _ := typeutil.Callee(pass.TypesInfo, ce).(*types.Func) + if fn == nil { + return true + } + + switch fn.FullName() { + case "encoding/json.Marshal", "encoding/json.MarshalIndent": + e.handleJSONMarshal(pass, ce, fn.FullName(), evaluateMarshalErrorTarget(as.Lhs[1]), e.omitSafe) + case "(*encoding/json.Encoder).Encode": + e.handleJSONMarshal(pass, ce, fn.FullName(), evaluateMarshalErrorTarget(as.Lhs[0]), true) + default: + return true + } + return false + }) + } + + return nil, nil +} + +func evaluateMarshalErrorTarget(n ast.Expr) marshalErrorTarget { + if errIdent, ok := n.(*ast.Ident); ok { + if errIdent.Name == "_" { + return blankIdentifier + } + } + return variableAssignment +} + +type marshalErrorTarget int + +const ( + blankIdentifier = iota // the returned error from the JSON marshal function is assigned to the blank identifier "_". + variableAssignment // the returned error from the JSON marshal function is assigned to a variable. + functionArgument // the returned error from the JSON marshal function is passed to an other function as argument. +) + +func (e *errchkjson) handleJSONMarshal(pass *analysis.Pass, ce *ast.CallExpr, fnName string, errorTarget marshalErrorTarget, omitSafe bool) { + t := pass.TypesInfo.TypeOf(ce.Args[0]) + if t == nil { + // Not sure, if this is at all possible + if errorTarget == blankIdentifier { + pass.Reportf(ce.Pos(), "Type of argument to `%s` could not be evaluated and error return value is not checked", fnName) + } + return + } + + if _, ok := t.(*types.Pointer); ok { + t = t.(*types.Pointer).Elem() + } + + err := e.jsonSafe(t, 0, map[types.Type]struct{}{}) + if err != nil { + if _, ok := err.(unsupported); ok { + pass.Reportf(ce.Pos(), "`%s` for %v", fnName, err) + return + } + if _, ok := err.(noexported); ok { + pass.Reportf(ce.Pos(), "Error argument passed to `%s` does not contain any exported field", fnName) + } + // Only care about unsafe types if they are assigned to the blank identifier. + if errorTarget == blankIdentifier { + pass.Reportf(ce.Pos(), "Error return value of `%s` is not checked: %v", fnName, err) + } + } + if err == nil && errorTarget == variableAssignment && !omitSafe { + pass.Reportf(ce.Pos(), "Error return value of `%s` is checked but passed argument is safe", fnName) + } + // Report an error, if err for json.Marshal is not checked and safe types are omitted + if err == nil && errorTarget == blankIdentifier && omitSafe { + pass.Reportf(ce.Pos(), "Error return value of `%s` is not checked", fnName) + } +} + +const ( + allowedBasicTypes = types.IsBoolean | types.IsInteger | types.IsString + allowedMapKeyBasicTypes = types.IsInteger | types.IsString + unsupportedBasicTypes = types.IsComplex +) + +func (e *errchkjson) jsonSafe(t types.Type, level int, seenTypes map[types.Type]struct{}) error { + if _, ok := seenTypes[t]; ok { + return nil + } + + if types.Implements(t, textMarshalerInterface()) || types.Implements(t, jsonMarshalerInterface()) { + return fmt.Errorf("unsafe type `%s` found", t.String()) + } + + switch ut := t.Underlying().(type) { + case *types.Basic: + if ut.Info()&allowedBasicTypes > 0 { // bool, int-family, string + if ut.Info()&types.IsString > 0 && t.String() == "encoding/json.Number" { + return fmt.Errorf("unsafe type `%s` found", t.String()) + } + return nil + } + if ut.Info()&unsupportedBasicTypes > 0 { // complex64, complex128 + return newUnsupportedError(fmt.Errorf("unsupported type `%s` found", ut.String())) + } + switch ut.Kind() { + case types.UntypedNil: + return nil + case types.UnsafePointer: + return newUnsupportedError(fmt.Errorf("unsupported type `%s` found", ut.String())) + default: + // E.g. float32, float64 + return fmt.Errorf("unsafe type `%s` found", ut.String()) + } + + case *types.Array: + err := e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Slice: + err := e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Struct: + seenTypes[t] = struct{}{} + exported := 0 + for i := 0; i < ut.NumFields(); i++ { + if !ut.Field(i).Exported() { + // Unexported fields can be ignored + continue + } + if tag, ok := reflect.StructTag(ut.Tag(i)).Lookup("json"); ok { + if tag == "-" { + // Fields omitted in json can be ignored + continue + } + } + err := e.jsonSafe(ut.Field(i).Type(), level+1, seenTypes) + if err != nil { + return err + } + exported++ + } + if e.reportNoExported && level == 0 && exported == 0 { + return newNoexportedError(fmt.Errorf("struct does not export any field")) + } + return nil + + case *types.Pointer: + err := e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Map: + err := jsonSafeMapKey(ut.Key()) + if err != nil { + return err + } + err = e.jsonSafe(ut.Elem(), level+1, seenTypes) + if err != nil { + return err + } + return nil + + case *types.Chan, *types.Signature: + // Types that are not supported for encoding to json: + return newUnsupportedError(fmt.Errorf("unsupported type `%s` found", ut.String())) + + default: + // Types that are not supported for encoding to json or are not completely safe, like: interfaces + return fmt.Errorf("unsafe type `%s` found", t.String()) + } +} + +func jsonSafeMapKey(t types.Type) error { + if types.Implements(t, textMarshalerInterface()) || types.Implements(t, jsonMarshalerInterface()) { + return fmt.Errorf("unsafe type `%s` as map key found", t.String()) + } + switch ut := t.Underlying().(type) { + case *types.Basic: + if ut.Info()&types.IsString > 0 && t.String() == "encoding/json.Number" { + return fmt.Errorf("unsafe type `%s` as map key found", t.String()) + } + if ut.Info()&allowedMapKeyBasicTypes > 0 { // bool, int-family, string + return nil + } + // E.g. bool, float32, float64, complex64, complex128 + return newUnsupportedError(fmt.Errorf("unsupported type `%s` as map key found", t.String())) + case *types.Interface: + return fmt.Errorf("unsafe type `%s` as map key found", t.String()) + default: + // E.g. struct composed solely of basic types, that are comparable + return newUnsupportedError(fmt.Errorf("unsupported type `%s` as map key found", t.String())) + } +} + +func (e *errchkjson) inspectArgs(pass *analysis.Pass, args []ast.Expr) { + for _, a := range args { + ast.Inspect(a, func(n ast.Node) bool { + if n == nil { + return true + } + + ce, ok := n.(*ast.CallExpr) + if !ok { + return false + } + + fn, _ := typeutil.Callee(pass.TypesInfo, ce).(*types.Func) + if fn == nil { + return true + } + + switch fn.FullName() { + case "encoding/json.Marshal", "encoding/json.MarshalIndent": + e.handleJSONMarshal(pass, ce, fn.FullName(), functionArgument, e.omitSafe) + case "(*encoding/json.Encoder).Encode": + e.handleJSONMarshal(pass, ce, fn.FullName(), functionArgument, true) + default: + e.inspectArgs(pass, ce.Args) + } + return false + }) + } +} + +// Construct *types.Interface for interface encoding.TextMarshaler +// +// type TextMarshaler interface { +// MarshalText() (text []byte, err error) +// } +func textMarshalerInterface() *types.Interface { + textMarshalerInterface := types.NewInterfaceType([]*types.Func{ + types.NewFunc(token.NoPos, nil, "MarshalText", types.NewSignatureType( + nil, nil, nil, nil, types.NewTuple( + types.NewVar(token.NoPos, nil, "text", + types.NewSlice( + types.Universe.Lookup("byte").Type())), + types.NewVar(token.NoPos, nil, "err", types.Universe.Lookup("error").Type())), + false)), + }, nil) + textMarshalerInterface.Complete() + + return textMarshalerInterface +} + +// Construct *types.Interface for interface json.Marshaler +// +// type Marshaler interface { +// MarshalJSON() ([]byte, error) +// } +func jsonMarshalerInterface() *types.Interface { + textMarshalerInterface := types.NewInterfaceType([]*types.Func{ + types.NewFunc(token.NoPos, nil, "MarshalJSON", types.NewSignatureType( + nil, nil, nil, nil, types.NewTuple( + types.NewVar(token.NoPos, nil, "", + types.NewSlice( + types.Universe.Lookup("byte").Type())), + types.NewVar(token.NoPos, nil, "", types.Universe.Lookup("error").Type())), + false)), + }, nil) + textMarshalerInterface.Complete() + + return textMarshalerInterface +} diff --git a/vendor/github.com/breml/errchkjson/noexported_error.go b/vendor/github.com/breml/errchkjson/noexported_error.go new file mode 100644 index 0000000000..07b7a07d26 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/noexported_error.go @@ -0,0 +1,23 @@ +package errchkjson + +type noexported interface { + noexported() +} + +var _ noexported = noexportedError{} + +type noexportedError struct { + err error +} + +func newNoexportedError(err error) error { + return noexportedError{ + err: err, + } +} + +func (u noexportedError) noexported() {} + +func (u noexportedError) Error() string { + return u.err.Error() +} diff --git a/vendor/github.com/breml/errchkjson/unsupported_error.go b/vendor/github.com/breml/errchkjson/unsupported_error.go new file mode 100644 index 0000000000..1a38c3f532 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/unsupported_error.go @@ -0,0 +1,23 @@ +package errchkjson + +type unsupported interface { + unsupported() +} + +var _ unsupported = unsupportedError{} + +type unsupportedError struct { + err error +} + +func newUnsupportedError(err error) error { + return unsupportedError{ + err: err, + } +} + +func (u unsupportedError) unsupported() {} + +func (u unsupportedError) Error() string { + return u.err.Error() +} diff --git a/vendor/github.com/breml/errchkjson/version.go b/vendor/github.com/breml/errchkjson/version.go new file mode 100644 index 0000000000..77d8ef8bb0 --- /dev/null +++ b/vendor/github.com/breml/errchkjson/version.go @@ -0,0 +1,19 @@ +package errchkjson + +import ( + "fmt" + "os" +) + +var Version = "errchkjson version dev" + +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + fmt.Println(Version) + os.Exit(0) + return nil +} diff --git a/vendor/github.com/butuzov/ireturn/LICENSE b/vendor/github.com/butuzov/ireturn/LICENSE new file mode 100644 index 0000000000..a9752e9726 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Oleg Butuzov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go new file mode 100644 index 0000000000..f68170fb31 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go @@ -0,0 +1,277 @@ +package analyzer + +import ( + "flag" + "go/ast" + gotypes "go/types" + "runtime" + "strings" + "sync" + + "github.com/butuzov/ireturn/analyzer/internal/config" + "github.com/butuzov/ireturn/analyzer/internal/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const name string = "ireturn" // linter name + +type validator interface { + IsValid(types.IFace) bool +} + +type analyzer struct { + once sync.Once + mu sync.RWMutex + handler validator + err error + diabledNolint bool + + found []analysis.Diagnostic +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + // 00. Part 1. Handling Configuration Only Once. + a.once.Do(func() { a.readConfiguration(&pass.Analyzer.Flags) }) + + // 00. Part 2. Handling Errors + if a.err != nil { + return nil, a.err + } + + ins, _ := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // 00. does file have dot-imported standard packages? + dotImportedStd := make(map[string]struct{}) + ins.Preorder([]ast.Node{(*ast.ImportSpec)(nil)}, func(node ast.Node) { + i, _ := node.(*ast.ImportSpec) + if i.Name != nil && i.Name.Name == "." { + dotImportedStd[strings.Trim(i.Path.Value, `"`)] = struct{}{} + } + }) + + // 01. Running Inspection. + ins.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + // 001. Casting to funcdecl + f, _ := node.(*ast.FuncDecl) + + // 002. Does it return any results ? + if f.Type == nil || f.Type.Results == nil { + return + } + + // 003. Is it allowed to be checked? + if !a.diabledNolint && hasDisallowDirective(f.Doc) { + return + } + + seen := make(map[string]bool, 4) + + // 004. Filtering Results. + for _, issue := range filterInterfaces(pass, f.Type, dotImportedStd) { + if a.handler.IsValid(issue) { + continue + } + + issue.Enrich(f) + + key := issue.HashString() + + if ok := seen[key]; ok { + continue + } + seen[key] = true + + a.addDiagnostic(issue.ExportDiagnostic()) + } + }) + + // 02. Printing reports. + a.mu.RLock() + defer a.mu.RUnlock() + for i := range a.found { + pass.Report(a.found[i]) + } + + return nil, nil +} + +func (a *analyzer) addDiagnostic(d analysis.Diagnostic) { + a.mu.Lock() + defer a.mu.Unlock() + + a.found = append(a.found, d) +} + +func (a *analyzer) readConfiguration(fs *flag.FlagSet) { + cnf, err := config.New(fs) + if err != nil { + a.err = err + return + } + + // First: checking nonolint directive + val := fs.Lookup("nonolint") + if val != nil { + a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true" + } + + // Second: validators implementation next + if validatorImpl, ok := cnf.(validator); ok { + a.handler = validatorImpl + return + } + + a.handler = config.DefaultValidatorConfig() +} + +func NewAnalyzer() *analysis.Analyzer { + a := analyzer{} //nolint: exhaustivestruct + + return &analysis.Analyzer{ + Name: name, + Doc: "Accept Interfaces, Return Concrete Types", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Flags: flags(), + } +} + +func flags() flag.FlagSet { + set := flag.NewFlagSet("", flag.PanicOnError) + set.String("allow", "", "accept-list of the comma-separated interfaces") + set.String("reject", "", "reject-list of the comma-separated interfaces") + set.Bool("nonolint", false, "disable nolint checks") + return *set +} + +func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{}) []types.IFace { + var results []types.IFace + + if ft.Results == nil { // this can't happen, but double checking. + return results + } + + for _, el := range ft.Results.List { + switch v := el.Type.(type) { + // ----- empty or anonymous interfaces + case *ast.InterfaceType: + if len(v.Methods.List) == 0 { + results = append(results, types.NewIssue("interface{}", types.EmptyInterface)) + continue + } + + results = append(results, types.NewIssue("anonymous interface", types.AnonInterface)) + + // ------ Errors and interfaces from same package + case *ast.Ident: + + t1 := p.TypesInfo.TypeOf(el.Type) + val, ok := t1.Underlying().(*gotypes.Interface) + if !ok { + continue + } + + var ( + name = t1.String() + isNamed = strings.Contains(name, ".") + isEmpty = val.Empty() + ) + + // catching any + if isEmpty && name == "any" { + results = append(results, types.NewIssue(name, types.EmptyInterface)) + continue + } + + // NOTE: FIXED! + if name == "error" { + results = append(results, types.NewIssue(name, types.ErrorInterface)) + continue + } + + if !isNamed { + + typeParams := val.String() + prefix, suffix := "interface{", "}" + if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple + typeParams = typeParams[len(prefix):] + } + if strings.HasSuffix(typeParams, suffix) { + typeParams = typeParams[:len(typeParams)-1] + } + + goVersion := runtime.Version() + if strings.HasPrefix(goVersion, "go1.18") || strings.HasPrefix(goVersion, "go1.19") { + typeParams = strings.ReplaceAll(typeParams, "|", " | ") + } + + results = append(results, types.IFace{ + Name: name, + Type: types.Generic, + OfType: typeParams, + }) + continue + } + + // is it dot-imported package? + // handling cases when stdlib package imported via "." dot-import + if len(di) > 0 { + pkgName := stdPkgInterface(name) + if _, ok := di[pkgName]; ok { + results = append(results, types.NewIssue(name, types.NamedStdInterface)) + + continue + } + } + + results = append(results, types.NewIssue(name, types.NamedInterface)) + + // ------- standard library and 3rd party interfaces + case *ast.SelectorExpr: + + t1 := p.TypesInfo.TypeOf(el.Type) + if !gotypes.IsInterface(t1.Underlying()) { + continue + } + + word := t1.String() + if isStdPkgInterface(word) { + results = append(results, types.NewIssue(word, types.NamedStdInterface)) + continue + } + + results = append(results, types.NewIssue(word, types.NamedInterface)) + } + } + + return results +} + +// stdPkgInterface will return package name if tis std lib package +// or empty string on fail. +func stdPkgInterface(named string) string { + // find last "." index. + idx := strings.LastIndex(named, ".") + if idx == -1 { + return "" + } + + return stdPkg(named[0:idx]) +} + +// isStdPkgInterface will run small checks against pkg to find out if named +// interface we looking on - comes from a standard library or not. +func isStdPkgInterface(namedInterface string) bool { + return stdPkgInterface(namedInterface) != "" +} + +func stdPkg(pkg string) string { + if _, ok := std[pkg]; ok { + return pkg + } + + return "" +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/disallow.go b/vendor/github.com/butuzov/ireturn/analyzer/disallow.go new file mode 100644 index 0000000000..36b6fcb4f3 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/disallow.go @@ -0,0 +1,45 @@ +package analyzer + +import ( + "go/ast" + "strings" +) + +const nolintPrefix = "//nolint" + +func hasDisallowDirective(cg *ast.CommentGroup) bool { + if cg == nil { + return false + } + + return directiveFound(cg) +} + +func directiveFound(cg *ast.CommentGroup) bool { + for i := len(cg.List) - 1; i >= 0; i-- { + comment := cg.List[i] + if !strings.HasPrefix(comment.Text, nolintPrefix) { + continue + } + + startingIdx := len(nolintPrefix) + for { + idx := strings.Index(comment.Text[startingIdx:], name) + if idx == -1 { + break + } + + if len(comment.Text[startingIdx+idx:]) == len(name) { + return true + } + + c := comment.Text[startingIdx+idx+len(name)] + if c == '.' || c == ',' || c == ' ' || c == ' ' { + return true + } + startingIdx += idx + 1 + } + } + + return false +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go new file mode 100644 index 0000000000..6a294ca35f --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go @@ -0,0 +1,17 @@ +package config + +import "github.com/butuzov/ireturn/analyzer/internal/types" + +// allowConfig specifies a list of interfaces (keywords, patters and regular expressions) +// that are allowed by ireturn as valid to return, any non listed interface are rejected. +type allowConfig struct { + *defaultConfig +} + +func allowAll(patterns []string) *allowConfig { + return &allowConfig{&defaultConfig{List: patterns}} +} + +func (ac *allowConfig) IsValid(i types.IFace) bool { + return ac.Has(i) +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/config.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/config.go new file mode 100644 index 0000000000..46c73170ae --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/config.go @@ -0,0 +1,66 @@ +package config + +import ( + "regexp" + "sync" + + "github.com/butuzov/ireturn/analyzer/internal/types" +) + +// defaultConfig is core of the validation, ... +// todo(butuzov): write proper intro... + +type defaultConfig struct { + List []string + + // private fields (for search optimization look ups) + once sync.Once + quick uint8 + list []*regexp.Regexp +} + +func (config *defaultConfig) Has(i types.IFace) bool { + config.once.Do(config.compileList) + + if config.quick&uint8(i.Type) > 0 { + return true + } + + // not a named interface (because error, interface{}, anon interface has keywords.) + if i.Type&types.NamedInterface == 0 && i.Type&types.NamedStdInterface == 0 { + return false + } + + for _, re := range config.list { + if re.MatchString(i.Name) { + return true + } + } + + return false +} + +// compileList will transform text list into a bitmask for quick searches and +// slice of regular expressions for quick searches. +func (config *defaultConfig) compileList() { + for _, str := range config.List { + switch str { + case types.NameError: + config.quick |= uint8(types.ErrorInterface) + case types.NameEmpty: + config.quick |= uint8(types.EmptyInterface) + case types.NameAnon: + config.quick |= uint8(types.AnonInterface) + case types.NameStdLib: + config.quick |= uint8(types.NamedStdInterface) + case types.NameGeneric: + config.quick |= uint8(types.Generic) + } + + // allow to parse regular expressions + // todo(butuzov): how can we log error in golangci-lint? + if re, err := regexp.Compile(str); err == nil { + config.list = append(config.list, re) + } + } +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go new file mode 100644 index 0000000000..6aa04e52e8 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go @@ -0,0 +1,74 @@ +package config + +import ( + "errors" + "flag" + "strings" + + "github.com/butuzov/ireturn/analyzer/internal/types" +) + +var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time") + +// nolint: exhaustivestruct +func DefaultValidatorConfig() *allowConfig { + return allowAll([]string{ + types.NameEmpty, // "empty": empty interfaces (interface{}) + types.NameError, // "error": for all error's + types.NameAnon, // "anon": for all empty interfaces with methods (interface {Method()}) + types.NameStdLib, // "std": for all standard library packages + }) +} + +// New is factory function that return allowConfig or rejectConfig depending +// on provided arguments. +func New(fs *flag.FlagSet) (interface{}, error) { + var ( + allowList = toSlice(getFlagVal(fs, "allow")) + rejectList = toSlice(getFlagVal(fs, "reject")) + ) + + // can't have both at same time. + if len(allowList) != 0 && len(rejectList) != 0 { + return nil, ErrCollisionOfInterests + } + + switch { + case len(allowList) > 0: + return allowAll(allowList), nil + case len(rejectList) > 0: + return rejectAll(rejectList), nil + } + + // can have none (defaults are used) at same time. + return nil, nil +} + +// both constants used to cleanup items provided in comma separated list. +const ( + SepTab string = " " + SepSpace string = " " +) + +func toSlice(s string) []string { + var results []string + + for _, pattern := range strings.Split(s, ",") { + pattern = strings.Trim(pattern, SepTab+SepSpace) + if pattern != "" { + results = append(results, pattern) + } + } + + return results +} + +func getFlagVal(fs *flag.FlagSet, name string) string { + flg := fs.Lookup(name) + + if flg == nil { + return "" + } + + return flg.Value.String() +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go new file mode 100644 index 0000000000..bef6913bb8 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go @@ -0,0 +1,17 @@ +package config + +import "github.com/butuzov/ireturn/analyzer/internal/types" + +// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions) +// that are rejected by ireturn as valid to return, any non listed interface are allowed. +type rejectConfig struct { + *defaultConfig +} + +func rejectAll(patterns []string) *rejectConfig { + return &rejectConfig{&defaultConfig{List: patterns}} +} + +func (rc *rejectConfig) IsValid(i types.IFace) bool { + return !rc.Has(i) +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go new file mode 100644 index 0000000000..5e576374d5 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go @@ -0,0 +1,54 @@ +package types + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +type IFace struct { + Name string // Interface name + Type IType // Type of the interface + + Pos token.Pos // Token Position + FuncName string // + OfType string +} + +func NewIssue(name string, interfaceType IType) IFace { + return IFace{ + Name: name, + // Pos: pos, + Type: interfaceType, + } +} + +func (i *IFace) Enrich(f *ast.FuncDecl) { + i.FuncName = f.Name.Name + i.Pos = f.Pos() +} + +func (i IFace) String() string { + if i.Type != Generic { + return fmt.Sprintf("%s returns interface (%s)", i.FuncName, i.Name) + } + + if i.OfType != "" { + return fmt.Sprintf("%s returns generic interface (%s) of type param %s", i.FuncName, i.Name, i.OfType) + } + + return fmt.Sprintf("%s returns generic interface (%s)", i.FuncName, i.Name) +} + +func (i IFace) HashString() string { + return fmt.Sprintf("%v-%s", i.Pos, i.String()) +} + +func (i IFace) ExportDiagnostic() analysis.Diagnostic { + return analysis.Diagnostic{ //nolint: exhaustivestruct + Pos: i.Pos, + Message: i.String(), + } +} diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/names.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/names.go new file mode 100644 index 0000000000..1092c9667c --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/names.go @@ -0,0 +1,9 @@ +package types + +const ( + NameEmpty = "empty" + NameAnon = "anon" + NameError = "error" + NameStdLib = "stdlib" + NameGeneric = "generic" +) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/internal/types/types.go b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/types.go new file mode 100644 index 0000000000..5c0bd74077 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/internal/types/types.go @@ -0,0 +1,12 @@ +package types + +type IType uint8 + +const ( + EmptyInterface IType = 1 << iota // ref as empty + AnonInterface // ref as anon + ErrorInterface // ref as error + NamedInterface // ref as named + NamedStdInterface // ref as named stdlib + Generic // ref as generic type parameter +) diff --git a/vendor/github.com/butuzov/ireturn/analyzer/std.go b/vendor/github.com/butuzov/ireturn/analyzer/std.go new file mode 100644 index 0000000000..cac4646126 --- /dev/null +++ b/vendor/github.com/butuzov/ireturn/analyzer/std.go @@ -0,0 +1,203 @@ +// Code generated using std.sh; DO NOT EDIT. + +// We will ignore that fact that some of packages +// were removed from stdlib. + +package analyzer + +var std = map[string]struct{}{ + // added in Go v1.2 in compare to v1.1 (docker image) + "archive/tar": {}, + "archive/zip": {}, + "bufio": {}, + "bytes": {}, + "cmd/cgo": {}, + "cmd/fix": {}, + "cmd/go": {}, + "cmd/gofmt": {}, + "cmd/yacc": {}, + "compress/bzip2": {}, + "compress/flate": {}, + "compress/gzip": {}, + "compress/lzw": {}, + "compress/zlib": {}, + "container/heap": {}, + "container/list": {}, + "container/ring": {}, + "crypto": {}, + "crypto/aes": {}, + "crypto/cipher": {}, + "crypto/des": {}, + "crypto/dsa": {}, + "crypto/ecdsa": {}, + "crypto/elliptic": {}, + "crypto/hmac": {}, + "crypto/md5": {}, + "crypto/rand": {}, + "crypto/rc4": {}, + "crypto/rsa": {}, + "crypto/sha1": {}, + "crypto/sha256": {}, + "crypto/sha512": {}, + "crypto/subtle": {}, + "crypto/tls": {}, + "crypto/x509": {}, + "crypto/x509/pkix": {}, + "database/sql": {}, + "database/sql/driver": {}, + "debug/dwarf": {}, + "debug/elf": {}, + "debug/gosym": {}, + "debug/macho": {}, + "debug/pe": {}, + "encoding": {}, + "encoding/ascii85": {}, + "encoding/asn1": {}, + "encoding/base32": {}, + "encoding/base64": {}, + "encoding/binary": {}, + "encoding/csv": {}, + "encoding/gob": {}, + "encoding/hex": {}, + "encoding/json": {}, + "encoding/pem": {}, + "encoding/xml": {}, + "errors": {}, + "expvar": {}, + "flag": {}, + "fmt": {}, + "go/ast": {}, + "go/build": {}, + "go/doc": {}, + "go/format": {}, + "go/parser": {}, + "go/printer": {}, + "go/scanner": {}, + "go/token": {}, + "hash": {}, + "hash/adler32": {}, + "hash/crc32": {}, + "hash/crc64": {}, + "hash/fnv": {}, + "html": {}, + "html/template": {}, + "image": {}, + "image/color": {}, + "image/color/palette": {}, + "image/draw": {}, + "image/gif": {}, + "image/jpeg": {}, + "image/png": {}, + "index/suffixarray": {}, + "io": {}, + "io/ioutil": {}, + "log": {}, + "log/syslog": {}, + "math": {}, + "math/big": {}, + "math/cmplx": {}, + "math/rand": {}, + "mime": {}, + "mime/multipart": {}, + "net": {}, + "net/http": {}, + "net/http/cgi": {}, + "net/http/cookiejar": {}, + "net/http/fcgi": {}, + "net/http/httptest": {}, + "net/http/httputil": {}, + "net/http/pprof": {}, + "net/mail": {}, + "net/rpc": {}, + "net/rpc/jsonrpc": {}, + "net/smtp": {}, + "net/textproto": {}, + "net/url": {}, + "os": {}, + "os/exec": {}, + "os/signal": {}, + "os/user": {}, + "path": {}, + "path/filepath": {}, + "reflect": {}, + "regexp": {}, + "regexp/syntax": {}, + "runtime": {}, + "runtime/cgo": {}, + "runtime/debug": {}, + "runtime/pprof": {}, + "runtime/race": {}, + "sort": {}, + "strconv": {}, + "strings": {}, + "sync": {}, + "sync/atomic": {}, + "syscall": {}, + "testing": {}, + "testing/iotest": {}, + "testing/quick": {}, + "text/scanner": {}, + "text/tabwriter": {}, + "text/template": {}, + "text/template/parse": {}, + "time": {}, + "unicode": {}, + "unicode/utf16": {}, + "unicode/utf8": {}, + "unsafe": {}, + // added in Go v1.3 in compare to v1.2 (docker image) + "cmd/addr2line": {}, + "cmd/nm": {}, + "cmd/objdump": {}, + "cmd/pack": {}, + "debug/plan9obj": {}, + // added in Go v1.4 in compare to v1.3 (docker image) + "cmd/pprof": {}, + // added in Go v1.5 in compare to v1.4 (docker image) + "go/constant": {}, + "go/importer": {}, + "go/types": {}, + "mime/quotedprintable": {}, + "runtime/trace": {}, + // added in Go v1.6 in compare to v1.5 (docker image) + // added in Go v1.7 in compare to v1.6 (docker image) + "context": {}, + "net/http/httptrace": {}, + // added in Go v1.8 in compare to v1.7 (docker image) + "plugin": {}, + // added in Go v1.9 in compare to v1.8 (docker image) + "math/bits": {}, + // added in Go v1.10 in compare to v1.9 (docker image) + // added in Go v1.11 in compare to v1.10 (docker image) + // added in Go v1.12 in compare to v1.11 (docker image) + // added in Go v1.13 in compare to v1.12 (docker image) + "crypto/ed25519": {}, + // added in Go v1.14 in compare to v1.13 (docker image) + "hash/maphash": {}, + // added in Go v1.15 in compare to v1.14 (docker image) + "time/tzdata": {}, + // added in Go v1.16 in compare to v1.15 (docker image) + "embed": {}, + "go/build/constraint": {}, + "io/fs": {}, + "runtime/metrics": {}, + "testing/fstest": {}, + // added in Go v1.17 in compare to v1.16 (docker image) + // added in Go v1.18 in compare to v1.17 (docker image) + "debug/buildinfo": {}, + "net/netip": {}, + // added in Go v1.19 in compare to v1.18 (docker image) + "go/doc/comment": {}, + // added in Go v1.20 in compare to v1.19 (docker image) + "crypto/ecdh": {}, + "runtime/coverage": {}, + // added in Go v1.21 in compare to v1.20 (docker image) + "cmp": {}, + "log/slog": {}, + "maps": {}, + "slices": {}, + "testing/slogtest": {}, + // added in Go v1.22 in compare to v1.21 (docker image) + "go/version": {}, + "math/rand/v2": {}, +} diff --git a/vendor/github.com/butuzov/mirror/.act b/vendor/github.com/butuzov/mirror/.act new file mode 100644 index 0000000000..8182d703ae --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.act @@ -0,0 +1,2 @@ +--platform ubuntu-latest=butuzov/act-go:latest +--env DRY_RUN=1 diff --git a/vendor/github.com/butuzov/mirror/.editorconfig b/vendor/github.com/butuzov/mirror/.editorconfig new file mode 100644 index 0000000000..4d9c20d8d9 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.editorconfig @@ -0,0 +1,28 @@ +# top-most EditorConfig file +root = true + + +[*] +end_of_line = lf # Unix-style newlines +charset = utf-8 + +indent_style = space # default identation - spaces +indent_size = 4 # default identation - size + +insert_final_newline = true # new line at the end of file +trim_trailing_whitespace = true # no extra sapces at the end of lines + +[*.{go,gohtml,gotpl}] # Go +indent_style = tab +indent_size = 2 + +[{Makefile,makefile}] # CMake +indent_style = tab + +[*.md] # Markdown +trim_trailing_whitespace = true +max_line_length = 100 +insert_final_newline = true +indent_size = 2 + + diff --git a/vendor/github.com/butuzov/mirror/.gitignore b/vendor/github.com/butuzov/mirror/.gitignore new file mode 100644 index 0000000000..109f33b98e --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.gitignore @@ -0,0 +1,11 @@ +# artifacts +coverage.cov +bin/* +dist/* +tmp/* +out* +sandbox* +demo* +.task* +.ipynb* +.jupyter* diff --git a/vendor/github.com/butuzov/mirror/.goreleaser.yaml b/vendor/github.com/butuzov/mirror/.goreleaser.yaml new file mode 100644 index 0000000000..11749ed2b3 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/.goreleaser.yaml @@ -0,0 +1,61 @@ +--- +project_name: mirror + +builds: + - binary: mirror + env: + - CGO_ENABLED=0 + goos: + - darwin + - linux + - windows + goarch: + - amd64 + - 386 + - arm64 + - arm + goarm: + - 6 + ignore: + - goos: windows + goarm: 6 + - goos: windows + goarch: arm64 + - goos: linux + goarm: 6 + - goos: darwin + goarch: 386 + main: ./cmd/mirror/ + flags: + - -trimpath + ldflags: -s -w + +checksum: + name_template: 'checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - Merge pull request + - Merge branch + +archives: + - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}{{ if .Mips }}_{{ .Mips }}{{ end }}' + replacements: + darwin: darwin + linux: linux + windows: windows + 386: i386 + amd64: x86_64 + format_overrides: + - goos: windows + format: zip + files: + - LICENSE + - readme.md diff --git a/vendor/github.com/butuzov/mirror/LICENSE b/vendor/github.com/butuzov/mirror/LICENSE new file mode 100644 index 0000000000..a9752e9726 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Oleg Butuzov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md new file mode 100644 index 0000000000..776816e514 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md @@ -0,0 +1,150 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
func (b *bufio.Writer) WriteString(s string) (int, error) + func (b *bufio.Writer) Write(p []byte) (int, error) + func (b *bufio.Writer) WriteRune(r rune) (int, error) +
func (b *bytes.Buffer) WriteString(s string) (int, error) + func (b *bytes.Buffer) Write(p []byte) (int, error) + func (b *bytes.Buffer) WriteRune(r rune) (int, error) +
func strings.Compare(a, b string) intfunc bytes.Compare(a, b []byte) int
func strings.Contains(s, substr string) boolfunc bytes.Contains(b, subslice []byte) bool
func strings.ContainsAny(s, chars string) boolfunc bytes.ContainsAny(b []byte, chars string) bool
func strings.ContainsRune(s string, r rune) boolfunc bytes.ContainsRune(b []byte, r rune) bool
func strings.Count(s, substr string) intfunc bytes.Count(s, sep []byte) int
func strings.EqualFold(s, t string) boolfunc bytes.EqualFold(s, t []byte) bool
func strings.HasPrefix(s, prefix string) boolfunc bytes.HasPrefix(s, prefix []byte) bool
func strings.HasSuffix(s, suffix string) boolfunc bytes.HasSuffix(s, suffix []byte) bool
func strings.Index(s, substr string) intfunc bytes.Index(s, sep []byte) int
func strings.IndexAny(s, chars string) intfunc bytes.IndexAny(s []byte, chars string) int
func strings.IndexByte(s string, c byte) intfunc bytes.IndexByte(b []byte, c byte) int
func strings.IndexFunc(s string, f func(rune) bool) intfunc bytes.IndexFunc(s []byte, f func(r rune) bool) int
func strings.IndexRune(s string, r rune) intfunc bytes.IndexRune(s []byte, r rune) int
func strings.LastIndex(s, sep string) intfunc bytes.LastIndex(s, sep []byte) int
func strings.LastIndexAny(s, chars string) intfunc bytes.LastIndexAny(s []byte, chars string) int
func strings.LastIndexByte(s string, c byte) intfunc bytes.LastIndexByte(s []byte, c byte) int
func strings.LastIndexFunc(s string, f func(rune) bool) intfunc bytes.LastIndexFunc(s []byte, f func(r rune) bool) int
func bytes.NewBufferString(s string) *bytes.Bufferfunc bytes.NewBuffer(buf []byte *bytes.Buffer
func (h *hash/maphash.Hash) WriteString(s string) (int, error)func (h *hash/maphash.Hash) Write(b []byte) (int, error)
func (rw *net/http/httptest.ResponseRecorder) WriteString(str string) (int, error)func (rw *net/http/httptest.ResponseRecorder) Write(buf []byte) (int, error)
func (f *os.File) WriteString(s string) (n int, err error)func (f *os.File) Write(b []byte) (n int, err error)
func regexp.MatchString(pattern string, s string) (bool, error)func regexp.Match(pattern string, b []byte) (bool, error)
func (re *regexp.Regexp) FindAllStringIndex(s string, n int) [][]intfunc (re *regexp.Regexp) FindAllIndex(b []byte, n int) [][]int
func (re *regexp.Regexp) FindAllStringSubmatch(s string, n int) [][]stringfunc (re *regexp.Regexp) FindAllSubmatch(b []byte, n int) [][][]byte
func (re *regexp.Regexp) FindStringIndex(s string) (loc []int)func (re *regexp.Regexp) FindIndex(b []byte) (loc []int)
func (re *regexp.Regexp) FindStringSubmatchIndex(s string) []intfunc (re *regexp.Regexp) FindSubmatchIndex(b []byte) []int
func (re *regexp.Regexp) MatchString(s string) boolfunc (re *regexp.Regexp) Match(b []byte) bool
func (b *strings.Builder) WriteString(s string) error + func (b *strings.Builder) Write(p []byte) (int, error) + func (b *strings.Builder) WriteRune(r rune) (int, error) +
func utf8.ValidString(s string) boolfunc utf8.Valid(p []byte) bool
func utf8.FullRuneInString(s string) boolfunc utf8.FullRune(p []byte) bool
func utf8.RuneCountInString(s string) (n int)func utf8.RuneCount(p []byte) int
func utf8.DecodeLastRuneInString(s string) (rune, int)func utf8.DecodeLastRune(p []byte) (rune, int)
func utf8.DecodeRuneInString(s string) (une, int)func utf8.DecodeRune(p []byte) (rune, int)
diff --git a/vendor/github.com/butuzov/mirror/Makefile b/vendor/github.com/butuzov/mirror/Makefile new file mode 100644 index 0000000000..b4b952b012 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/Makefile @@ -0,0 +1,58 @@ +# --- Required ---------------------------------------------------------------- +export PATH := $(PWD)/bin:$(PATH) # ./bin to $PATH +export SHELL := bash # Default Shell + +GOPKGS := $(shell go list ./... | grep -vE "(cmd|sandbox|testdata)" | tr -s '\n' ',' | sed 's/.\{1\}$$//' ) + + +build: + @ go build -trimpath -ldflags="-w -s" \ + -o bin/mirror ./cmd/mirror/ + +build-race: + @ go build -race -trimpath -ldflags="-w -s" \ + -o bin/mirror ./cmd/mirror/ + +tests: + go test -v -count=1 -race \ + -failfast \ + -parallel=2 \ + -timeout=1m \ + -covermode=atomic \ + -coverpkg=$(GOPKGS) -coverprofile=coverage.cov ./... + +tests-summary: + go test -v -count=1 -race \ + -failfast \ + -parallel=2 \ + -timeout=1m \ + -covermode=atomic \ + -coverpkg=$(GOPKGS) -coverprofile=coverage.cov --json ./... | tparse -all + +test-generate: + go run ./cmd/internal/generate-tests/ "$(PWD)/testdata" + +lints: + golangci-lint run --no-config ./... -D deadcode --skip-dirs "^(cmd|sandbox|testdata)" + + +cover: + go tool cover -html=coverage.cov + +install: + go install -trimpath -v -ldflags="-w -s" \ + ./cmd/mirror + +funcs: + echo "" > "out/results.txt" + go list std | grep -v "vendor" | grep -v "internal" | \ + xargs -I {} sh -c 'go doc -all {} > out/$(basename {}).txt' + +bin/goreleaser: + @curl -Ls https://github.com/goreleaser/goreleaser/releases/download/v1.17.2/goreleaser_Darwin_all.tar.gz | tar -zOxf - goreleaser > ./bin/goreleaser + chmod 0755 ./bin/goreleaser + +test-release: bin/goreleaser + goreleaser release --help + goreleaser release -f .goreleaser.yaml \ + --skip-validate --skip-publish --clean diff --git a/vendor/github.com/butuzov/mirror/Taskfile.yml b/vendor/github.com/butuzov/mirror/Taskfile.yml new file mode 100644 index 0000000000..26c9ba2571 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/Taskfile.yml @@ -0,0 +1,28 @@ +version: '3' + +tasks: + default: + sources: + - "./**/*.go" + method: timestamp + cmds: + - clear + - make build + - make build-race + - task: lints + # - make test-generate + - task: tests + - cmd: go run ./cmd/mirror/ --with-tests --with-debug ./sandbox + ignore_error: true + + testcase: go test -v -failfast -count=1 -run "TestAll/{{ .Case }}" ./... + + tests: + cmds: + - cmd: make tests + ignore_error: true + + lints: + cmds: + - cmd: make lints + ignore_error: true diff --git a/vendor/github.com/butuzov/mirror/analyzer.go b/vendor/github.com/butuzov/mirror/analyzer.go new file mode 100644 index 0000000000..13ded46c6d --- /dev/null +++ b/vendor/github.com/butuzov/mirror/analyzer.go @@ -0,0 +1,144 @@ +package mirror + +import ( + "flag" + "go/ast" + "strings" + + "github.com/butuzov/mirror/internal/checker" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +func NewAnalyzer() *analysis.Analyzer { + flags := flags() + + return &analysis.Analyzer{ + Name: "mirror", + Doc: "reports wrong mirror patterns of bytes/strings usage", + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Flags: flags, + } +} + +func run(pass *analysis.Pass) (interface{}, error) { + withTests := pass.Analyzer.Flags.Lookup("with-tests").Value.String() == "true" + // --- Reporting violations via issues --------------------------------------- + for _, violation := range Run(pass, withTests) { + pass.Report(violation.Diagnostic(pass.Fset)) + } + + return nil, nil +} + +func Run(pass *analysis.Pass, withTests bool) []*checker.Violation { + violations := []*checker.Violation{} + // --- Setup ----------------------------------------------------------------- + + check := checker.New( + BytesFunctions, BytesBufferMethods, + RegexpFunctions, RegexpRegexpMethods, + StringFunctions, StringsBuilderMethods, + BufioMethods, HTTPTestMethods, + OsFileMethods, MaphashMethods, + UTF8Functions, + ) + + check.Type = checker.WrapType(pass.TypesInfo) + check.Print = checker.WrapPrint(pass.Fset) + + ins, _ := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + imports := checker.Load(pass.Fset, ins) + + // --- Preorder Checker ------------------------------------------------------ + ins.Preorder([]ast.Node{(*ast.CallExpr)(nil)}, func(n ast.Node) { + callExpr := n.(*ast.CallExpr) + fileName := pass.Fset.Position(callExpr.Pos()).Filename + + if !withTests && strings.HasSuffix(fileName, "_test.go") { + return + } + + // ------------------------------------------------------------------------- + switch expr := callExpr.Fun.(type) { + // NOTE(butuzov): Regular calls (`*ast.SelectorExpr`) like strings.HasPrefix + // or re.Match are handled by this check + case *ast.SelectorExpr: + + x, ok := expr.X.(*ast.Ident) + if !ok { + return + } + + // TODO(butuzov): Add check for the ast.ParenExpr in e.Fun so we can + // target the constructions like this (and other calls) + // ----------------------------------------------------------------------- + // Example: + // (&maphash.Hash{}).Write([]byte("foobar")) + // ----------------------------------------------------------------------- + + // Case 1: Is this is a function call? + pkgName, name := x.Name, expr.Sel.Name + if pkg, ok := imports.Lookup(fileName, pkgName); ok { + if v := check.Match(pkg, name); v != nil { + if args, found := check.Handle(v, callExpr); found { + violations = append(violations, v.With(check.Print(expr.X), callExpr, args)) + } + return + } + } + + // Case 2: Is this is a method call? + tv := pass.TypesInfo.Types[expr.X] + if !tv.IsValue() || tv.Type == nil { + return + } + + pkgStruct, name := cleanAsterisk(tv.Type.String()), expr.Sel.Name + for _, v := range check.Matches(pkgStruct, name) { + if v == nil { + continue + } + + if args, found := check.Handle(v, callExpr); found { + violations = append(violations, v.With(check.Print(expr.X), callExpr, args)) + return + } + } + + case *ast.Ident: + // NOTE(butuzov): Special case of "." imported packages, only functions. + + if pkg, ok := imports.Lookup(fileName, "."); ok { + if v := check.Match(pkg, expr.Name); v != nil { + if args, found := check.Handle(v, callExpr); found { + violations = append(violations, v.With(nil, callExpr, args)) + } + return + } + } + } + }) + + return violations +} + +func flags() flag.FlagSet { + set := flag.NewFlagSet("", flag.PanicOnError) + set.Bool("with-tests", false, "do not skip tests in reports") + set.Bool("with-debug", false, "debug linter run (development only)") + return *set +} + +func cleanAsterisk(s string) string { + if strings.HasPrefix(s, "*") { + return s[1:] + } + + return s +} diff --git a/vendor/github.com/butuzov/mirror/checkers_bufio.go b/vendor/github.com/butuzov/mirror/checkers_bufio.go new file mode 100644 index 0000000000..292ed269aa --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_bufio.go @@ -0,0 +1,56 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var BufioMethods = []checker.Violation{ + { // (*bufio.Writer).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "bufio", + Struct: "Writer", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `b := bufio.Writer{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*bufio.Writer).WriteString + Type: checker.Method, + Targets: checker.Strings, + Package: "bufio", + Struct: "Writer", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `b := bufio.Writer{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, + { // (*bufio.Writer).WriteString -> (*bufio.Writer).WriteRune + Targets: checker.Strings, + Type: checker.Method, + Package: "bufio", + Struct: "Writer", + Caller: "WriteString", + Args: []int{0}, + ArgsType: checker.Rune, + AltCaller: "WriteRune", + }, + // { // (*bufio.Writer).WriteString -> (*bufio.Writer).WriteByte + // Targets: checker.Strings, + // Type: checker.Method, + // Package: "strings", + // Struct: "Builder", + // Caller: "WriteString", + // Args: []int{0}, + // ArgsType: checker.Byte, + // AltCaller: "WriteByte", // byte + // }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_bytes.go b/vendor/github.com/butuzov/mirror/checkers_bytes.go new file mode 100644 index 0000000000..c490a3784e --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_bytes.go @@ -0,0 +1,326 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var ( + BytesFunctions = []checker.Violation{ + { // bytes.NewBuffer + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "NewBuffer", + Args: []int{0}, + AltCaller: "NewBufferString", + + Generate: &checker.Generate{ + Pattern: `NewBuffer($0)`, + Returns: 1, + }, + }, + { // bytes.NewBufferString + Targets: checker.Strings, + Type: checker.Function, + Package: "bytes", + Caller: "NewBufferString", + Args: []int{0}, + AltCaller: "NewBuffer", + + Generate: &checker.Generate{ + Pattern: `NewBufferString($0)`, + Returns: 1, + }, + }, + { // bytes.Compare: + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Compare", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Compare", + + Generate: &checker.Generate{ + Pattern: `Compare($0, $1)`, + Returns: 1, + }, + }, + { // bytes.Contains: + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Contains", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Contains", + + Generate: &checker.Generate{ + Pattern: `Contains($0, $1)`, + Returns: 1, + }, + }, + { // bytes.ContainsAny + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "ContainsAny", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "ContainsAny", + + Generate: &checker.Generate{ + Pattern: `ContainsAny($0, "f")`, + Returns: 1, + }, + }, + { // bytes.ContainsRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "ContainsRune", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "ContainsRune", + + Generate: &checker.Generate{ + Pattern: `ContainsRune($0, 'ф')`, + Returns: 1, + }, + }, + { // bytes.Count + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Count", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Count", + + Generate: &checker.Generate{ + Pattern: `Count($0, $1)`, + Returns: 1, + }, + }, + { // bytes.EqualFold + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "EqualFold", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "EqualFold", + + Generate: &checker.Generate{ + Pattern: `EqualFold($0, $1)`, + Returns: 1, + }, + }, + + { // bytes.HasPrefix + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "HasPrefix", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "HasPrefix", + + Generate: &checker.Generate{ + Pattern: `HasPrefix($0, $1)`, + Returns: 1, + }, + }, + { // bytes.HasSuffix + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "HasSuffix", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "HasSuffix", + + Generate: &checker.Generate{ + Pattern: `HasSuffix($0, $1)`, + Returns: 1, + }, + }, + { // bytes.Index + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "Index", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "Index", + + Generate: &checker.Generate{ + Pattern: `Index($0, $1)`, + Returns: 1, + }, + }, + { // bytes.IndexAny + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexAny", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexAny", + + Generate: &checker.Generate{ + Pattern: `IndexAny($0, "f")`, + Returns: 1, + }, + }, + { // bytes.IndexByte + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexByte", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexByte", + + Generate: &checker.Generate{ + Pattern: `IndexByte($0, 'f')`, + Returns: 1, + }, + }, + { // bytes.IndexFunc + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexFunc", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexFunc", + + Generate: &checker.Generate{ + Pattern: `IndexFunc($0, func(rune) bool {return true })`, + Returns: 1, + }, + }, + { // bytes.IndexRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "IndexRune", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "IndexRune", + + Generate: &checker.Generate{ + Pattern: `IndexRune($0, rune('ф'))`, + Returns: 1, + }, + }, + { // bytes.LastIndex + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndex", + Args: []int{0, 1}, + AltPackage: "strings", + AltCaller: "LastIndex", + + Generate: &checker.Generate{ + Pattern: `LastIndex($0, $1)`, + Returns: 1, + }, + }, + { // bytes.LastIndexAny + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndexAny", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "LastIndexAny", + + Generate: &checker.Generate{ + Pattern: `LastIndexAny($0, "ф")`, + Returns: 1, + }, + }, + { // bytes.LastIndexByte + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndexByte", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "LastIndexByte", + + Generate: &checker.Generate{ + Pattern: `LastIndexByte($0, 'f')`, + Returns: 1, + }, + }, + { // bytes.LastIndexFunc + Targets: checker.Bytes, + Type: checker.Function, + Package: "bytes", + Caller: "LastIndexFunc", + Args: []int{0}, + AltPackage: "strings", + AltCaller: "LastIndexFunc", + + Generate: &checker.Generate{ + Pattern: `LastIndexFunc($0, func(rune) bool {return true })`, + Returns: 1, + }, + }, + } + + BytesBufferMethods = []checker.Violation{ + { // (*bytes.Buffer).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "bytes", + Struct: "Buffer", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `bb := bytes.Buffer{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*bytes.Buffer).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "bytes", + Struct: "Buffer", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `bb := bytes.Buffer{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, + { // (*bytes.Buffer).WriteString -> (*bytes.Buffer).WriteRune + Targets: checker.Strings, + Type: checker.Method, + Package: "bytes", + Struct: "Buffer", + Caller: "WriteString", + Args: []int{0}, + ArgsType: checker.Rune, + AltCaller: "WriteRune", + }, + // { // (*bytes.Buffer).WriteString -> (*bytes.Buffer).WriteByte + // Targets: checker.Strings, + // Type: checker.Method, + // Package: "bytes", + // Struct: "Buffer", + // Caller: "WriteString", + // Args: []int{0}, + // ArgsType: checker.Byte, + // AltCaller: "WriteByte", + // }, + } +) diff --git a/vendor/github.com/butuzov/mirror/checkers_httptest.go b/vendor/github.com/butuzov/mirror/checkers_httptest.go new file mode 100644 index 0000000000..ae67509300 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_httptest.go @@ -0,0 +1,36 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var HTTPTestMethods = []checker.Violation{ + { // (*net/http/httptest.ResponseRecorder).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "net/http/httptest", + Struct: "ResponseRecorder", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := httptest.ResponseRecorder{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*net/http/httptest.ResponseRecorder).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "net/http/httptest", + Struct: "ResponseRecorder", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := httptest.ResponseRecorder{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_maphash.go b/vendor/github.com/butuzov/mirror/checkers_maphash.go new file mode 100644 index 0000000000..4d184d2a95 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_maphash.go @@ -0,0 +1,36 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var MaphashMethods = []checker.Violation{ + { // (*hash/maphash).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*hash/maphash).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_os.go b/vendor/github.com/butuzov/mirror/checkers_os.go new file mode 100644 index 0000000000..09f5a18e58 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_os.go @@ -0,0 +1,36 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var OsFileMethods = []checker.Violation{ + { // (*os.File).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "os", + Struct: "File", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `f := &os.File{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*os.File).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "os", + Struct: "File", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `f := &os.File{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/checkers_regexp.go b/vendor/github.com/butuzov/mirror/checkers_regexp.go new file mode 100644 index 0000000000..17175e0286 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_regexp.go @@ -0,0 +1,187 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var ( + RegexpFunctions = []checker.Violation{ + { // regexp.Match + Targets: checker.Bytes, + Type: checker.Function, + Package: "regexp", + Caller: "Match", + Args: []int{1}, + AltCaller: "MatchString", + + Generate: &checker.Generate{ + Pattern: `Match("foo", $0)`, + Returns: 2, + }, + }, + { // regexp.MatchString + Targets: checker.Strings, + Type: checker.Function, + Package: "regexp", + Caller: "MatchString", + Args: []int{1}, + AltCaller: "Match", + + Generate: &checker.Generate{ + Pattern: `MatchString("foo", $0)`, + Returns: 2, + }, + }, + } + + RegexpRegexpMethods = []checker.Violation{ + { // (*regexp.Regexp).Match + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "Match", + Args: []int{0}, + AltCaller: "MatchString", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `Match($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).MatchString + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "MatchString", + Args: []int{0}, + AltCaller: "Match", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `MatchString($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllIndex", + Args: []int{0}, + AltCaller: "FindAllStringIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllStringIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllStringIndex", + Args: []int{0}, + AltCaller: "FindAllIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllStringIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllSubmatchIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllSubmatchIndex", + Args: []int{0}, + AltCaller: "FindAllStringSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllSubmatchIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindAllStringSubmatchIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindAllStringSubmatchIndex", + Args: []int{0}, + AltCaller: "FindAllSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindAllStringSubmatchIndex($0, 1)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindIndex", + Args: []int{0}, + AltCaller: "FindStringIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindIndex($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindStringIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindStringIndex", + Args: []int{0}, + AltCaller: "FindIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindStringIndex($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindSubmatchIndex + Targets: checker.Bytes, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindSubmatchIndex", + Args: []int{0}, + AltCaller: "FindStringSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindSubmatchIndex($0)`, + Returns: 1, + }, + }, + { // (*regexp.Regexp).FindStringSubmatchIndex + Targets: checker.Strings, + Type: checker.Method, + Package: "regexp", + Struct: "Regexp", + Caller: "FindStringSubmatchIndex", + Args: []int{0}, + AltCaller: "FindSubmatchIndex", + + Generate: &checker.Generate{ + PreCondition: `re := regexp.MustCompile(".*")`, + Pattern: `FindStringSubmatchIndex($0)`, + Returns: 1, + }, + }, + } +) diff --git a/vendor/github.com/butuzov/mirror/checkers_strings.go b/vendor/github.com/butuzov/mirror/checkers_strings.go new file mode 100644 index 0000000000..ead7e9cc7e --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_strings.go @@ -0,0 +1,299 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var ( + StringFunctions = []checker.Violation{ + { // strings.Compare + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Compare", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Compare", + + Generate: &checker.Generate{ + Pattern: `Compare($0,$1)`, + Returns: 1, + }, + }, + { // strings.Contains + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Contains", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Contains", + + Generate: &checker.Generate{ + Pattern: `Contains($0,$1)`, + Returns: 1, + }, + }, + { // strings.ContainsAny + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "ContainsAny", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "ContainsAny", + + Generate: &checker.Generate{ + Pattern: `ContainsAny($0,"foobar")`, + Returns: 1, + }, + }, + { // strings.ContainsRune + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "ContainsRune", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "ContainsRune", + + Generate: &checker.Generate{ + Pattern: `ContainsRune($0,'ф')`, + Returns: 1, + }, + }, + { // strings.Count + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Count", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Count", + + Generate: &checker.Generate{ + Pattern: `Count($0, $1)`, + Returns: 1, + }, + }, + { // strings.EqualFold + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "EqualFold", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "EqualFold", + + Generate: &checker.Generate{ + Pattern: `EqualFold($0,$1)`, + Returns: 1, + }, + }, + { // strings.HasPrefix + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "HasPrefix", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "HasPrefix", + + Generate: &checker.Generate{ + Pattern: `HasPrefix($0,$1)`, + Returns: 1, + }, + }, + { // strings.HasSuffix + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "HasSuffix", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "HasSuffix", + + Generate: &checker.Generate{ + Pattern: `HasSuffix($0,$1)`, + Returns: 1, + }, + }, + { // strings.Index + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "Index", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "Index", + + Generate: &checker.Generate{ + Pattern: `Index($0,$1)`, + Returns: 1, + }, + }, + { // strings.IndexAny + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexAny", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexAny", + + Generate: &checker.Generate{ + Pattern: `IndexAny($0, "f")`, + Returns: 1, + }, + }, + { // strings.IndexByte + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexByte", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexByte", + + Generate: &checker.Generate{ + Pattern: `IndexByte($0, byte('f'))`, + Returns: 1, + }, + }, + { // strings.IndexFunc + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexFunc", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexFunc", + + Generate: &checker.Generate{ + Pattern: `IndexFunc($0,func(r rune) bool { return true })`, + Returns: 1, + }, + }, + { // strings.IndexRune + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "IndexRune", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "IndexRune", + + Generate: &checker.Generate{ + Pattern: `IndexRune($0, rune('ф'))`, + Returns: 1, + }, + }, + { // strings.LastIndex + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndex", + Args: []int{0, 1}, + AltPackage: "bytes", + AltCaller: "LastIndex", + + Generate: &checker.Generate{ + Pattern: `LastIndex($0,$1)`, + Returns: 1, + }, + }, + { // strings.LastIndexAny + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndexAny", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "LastIndexAny", + + Generate: &checker.Generate{ + Pattern: `LastIndexAny($0,"f")`, + Returns: 1, + }, + }, + { // strings.LastIndexByte + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndexByte", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "LastIndexByte", + + Generate: &checker.Generate{ + Pattern: `LastIndexByte($0, byte('f'))`, + Returns: 1, + }, + }, + { // strings.LastIndexFunc + Targets: checker.Strings, + Type: checker.Function, + Package: "strings", + Caller: "LastIndexFunc", + Args: []int{0}, + AltPackage: "bytes", + AltCaller: "LastIndexFunc", + + Generate: &checker.Generate{ + Pattern: `LastIndexFunc($0, func(r rune) bool { return true })`, + Returns: 1, + }, + }, + } + + StringsBuilderMethods = []checker.Violation{ + { // (*strings.Builder).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "strings", + Struct: "Builder", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `builder := strings.Builder{}`, + Pattern: `Write($0)`, + Returns: 2, + }, + }, + { // (*strings.Builder).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "strings", + Struct: "Builder", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `builder := strings.Builder{}`, + Pattern: `WriteString($0)`, + Returns: 2, + }, + }, + { // (*strings.Builder).WriteString -> (*strings.Builder).WriteRune + Targets: checker.Strings, + Type: checker.Method, + Package: "strings", + Struct: "Builder", + Caller: "WriteString", + Args: []int{0}, + ArgsType: checker.Rune, + AltCaller: "WriteRune", + }, + // { // (*strings.Builder).WriteString -> (*strings.Builder).WriteByte + // Targets: checker.Strings, + // Type: checker.Method, + // Package: "strings", + // Struct: "Builder", + // Caller: "WriteString", + // Args: []int{0}, + // ArgsType: checker.Byte, + // AltCaller: "WriteByte", // byte + // }, + } +) diff --git a/vendor/github.com/butuzov/mirror/checkers_utf8.go b/vendor/github.com/butuzov/mirror/checkers_utf8.go new file mode 100644 index 0000000000..e7c4d5ba4d --- /dev/null +++ b/vendor/github.com/butuzov/mirror/checkers_utf8.go @@ -0,0 +1,138 @@ +package mirror + +import "github.com/butuzov/mirror/internal/checker" + +var UTF8Functions = []checker.Violation{ + { // utf8.Valid + Type: checker.Function, + Targets: checker.Bytes, + Package: "unicode/utf8", + Caller: "Valid", + Args: []int{0}, + AltCaller: "ValidString", + + Generate: &checker.Generate{ + Pattern: `Valid($0)`, + Returns: 1, + }, + }, + { // utf8.ValidString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "ValidString", + Args: []int{0}, + AltCaller: "Valid", + + Generate: &checker.Generate{ + Pattern: `ValidString($0)`, + Returns: 1, + }, + }, + { // utf8.FullRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "FullRune", + Args: []int{0}, + AltCaller: "FullRuneInString", + + Generate: &checker.Generate{ + Pattern: `FullRune($0)`, + Returns: 1, + }, + }, + { // utf8.FullRuneInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "FullRuneInString", + Args: []int{0}, + AltCaller: "FullRune", + + Generate: &checker.Generate{ + Pattern: `FullRuneInString($0)`, + Returns: 1, + }, + }, + + { // bytes.RuneCount + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "RuneCount", + Args: []int{0}, + AltCaller: "RuneCountInString", + + Generate: &checker.Generate{ + Pattern: `RuneCount($0)`, + Returns: 1, + }, + }, + { // bytes.RuneCountInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "RuneCountInString", + Args: []int{0}, + AltCaller: "RuneCount", + + Generate: &checker.Generate{ + Pattern: `RuneCountInString($0)`, + Returns: 1, + }, + }, + + { // bytes.DecodeLastRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "DecodeLastRune", + Args: []int{0}, + AltCaller: "DecodeLastRuneInString", + + Generate: &checker.Generate{ + Pattern: `DecodeLastRune($0)`, + Returns: 2, + }, + }, + { // utf8.DecodeLastRuneInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "DecodeLastRuneInString", + Args: []int{0}, + AltCaller: "DecodeLastRune", + + Generate: &checker.Generate{ + Pattern: `DecodeLastRuneInString($0)`, + Returns: 2, + }, + }, + { // utf8.DecodeRune + Targets: checker.Bytes, + Type: checker.Function, + Package: "unicode/utf8", + Caller: "DecodeRune", + Args: []int{0}, + AltCaller: "DecodeRuneInString", + + Generate: &checker.Generate{ + Pattern: `DecodeRune($0)`, + Returns: 2, + }, + }, + { // utf8.DecodeRuneInString + Targets: checker.Strings, + Type: checker.Function, + Package: "unicode/utf8", + Args: []int{0}, + Caller: "DecodeRuneInString", + AltCaller: "DecodeRune", + + Generate: &checker.Generate{ + Pattern: `DecodeRuneInString($0)`, + Returns: 2, + }, + }, +} diff --git a/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/vendor/github.com/butuzov/mirror/internal/checker/checker.go new file mode 100644 index 0000000000..c1a9416314 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/internal/checker/checker.go @@ -0,0 +1,147 @@ +package checker + +import ( + "bytes" + "go/ast" + "go/printer" + "go/token" + "go/types" + "strings" +) + +// Checker will perform standart check on package and its methods. +type Checker struct { + Violations []Violation // List of available violations + Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct + Type func(ast.Expr) string // Type Checker closure. + Print func(ast.Node) []byte // String representation of the expresion. +} + +func New(violations ...[]Violation) Checker { + c := Checker{ + Packages: make(map[string][]int), + } + + for i := range violations { + c.register(violations[i]) + } + + return c +} + +// Match will check the available violations we got from checks against +// the `name` caller from package `pkgName`. +func (c *Checker) Match(pkgName, name string) *Violation { + for _, v := range c.Matches(pkgName, name) { + return v + } + + return nil +} + +// Matches do same thing as Match but return a slice of violations +// as only things that require this are bytes.Buffer and strings.Builder +// it only be used in matching methods in analyzer. +func (c *Checker) Matches(pkgName, name string) []*Violation { + var matches []*Violation + checkStruct := strings.Contains(pkgName, ".") + + for _, idx := range c.Packages[pkgName] { + if c.Violations[idx].Caller == name { + if checkStruct == (len(c.Violations[idx].Struct) == 0) { + continue + } + + // copy violation + v := c.Violations[idx] + matches = append(matches, &v) + } + } + + return matches +} + +func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool) { + m := map[int]ast.Expr{} + + // We going to check each of elements we mark for checking, in order to find, + // a call that violates our rules. + for _, i := range v.Args { + if i >= len(ce.Args) { + continue + } + + call, ok := ce.Args[i].(*ast.CallExpr) + if !ok { + continue + } + + // is it convertsion call + if !c.callConverts(call) { + continue + } + + // somehow no argument of call + if len(call.Args) == 0 { + continue + } + + // wrong argument type + if normalType(c.Type(call.Args[0])) != v.getArgType() { + continue + } + + m[i] = call.Args[0] + } + + return m, len(m) == len(v.Args) +} + +func (c *Checker) callConverts(ce *ast.CallExpr) bool { + switch ce.Fun.(type) { + case *ast.ArrayType, *ast.Ident: + res := c.Type(ce.Fun) + return res == "[]byte" || res == "string" + } + + return false +} + +// register violations. +func (c *Checker) register(violations []Violation) { + for _, v := range violations { // nolint: gocritic + c.Violations = append(c.Violations, v) + if len(v.Struct) > 0 { + c.registerIdxPer(v.Package + "." + v.Struct) + } + c.registerIdxPer(v.Package) + } +} + +// registerIdxPer will register last added violation element +// under pkg string. +func (c *Checker) registerIdxPer(pkg string) { + c.Packages[pkg] = append(c.Packages[pkg], len(c.Violations)-1) +} + +func WrapType(info *types.Info) func(node ast.Expr) string { + return func(node ast.Expr) string { + if t := info.TypeOf(node); t != nil { + return t.String() + } + + if tv, ok := info.Types[node]; ok { + return tv.Type.Underlying().String() + } + + return "" + } +} + +func WrapPrint(fSet *token.FileSet) func(ast.Node) []byte { + return func(node ast.Node) []byte { + var buf bytes.Buffer + printer.Fprint(&buf, fSet, node) + return buf.Bytes() + } +} diff --git a/vendor/github.com/butuzov/mirror/internal/checker/imports.go b/vendor/github.com/butuzov/mirror/internal/checker/imports.go new file mode 100644 index 0000000000..4015de5970 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/internal/checker/imports.go @@ -0,0 +1,89 @@ +package checker + +import ( + "go/ast" + "go/token" + "path" + "sort" + "strings" + "sync" + + "golang.org/x/tools/go/ast/inspector" +) + +// Imports represents an imported package in a nice for lookup way... +// +// examples: +// import . "bytes" -> checker.Import{Pkg:"bytes", Val:"."} +// import name "bytes" -> checker.Import{Pkg:"bytes", Val:"name"} +type Import struct { + Pkg string // package name + Name string // alias +} + +type Imports map[string][]Import + +// we are going to have Imports entries to be sorted, but if it has less then +// `sortLowerLimit` elements we are skipping this step as its not going to +// be worth of effort. +const sortLowerLimit int = 13 + +// Package level lock is to prevent import map corruption +var lock sync.RWMutex + +func Load(fs *token.FileSet, ins *inspector.Inspector) Imports { + lock.Lock() + defer lock.Unlock() + + imports := make(Imports) + + // Populate imports map + ins.Preorder([]ast.Node{(*ast.ImportSpec)(nil)}, func(node ast.Node) { + importSpec, _ := node.(*ast.ImportSpec) + + var ( + key = fs.Position(node.Pos()).Filename + pkg = strings.Trim(importSpec.Path.Value, `"`) + name = importSpec.Name.String() + ) + + if importSpec.Name == nil { + name = path.Base(pkg) // note: we need only basename of the package + } + + imports[key] = append(imports[key], Import{ + Pkg: pkg, + Name: name, + }) + }) + + imports.sort() + + return imports +} + +// sort will sort imports for each of the checking files. +func (i *Imports) sort() { + for k := range *i { + if len((*i)[k]) < sortLowerLimit { + continue + } + + k := k + sort.Slice((*i)[k], func(left, right int) bool { + return (*i)[k][left].Name < (*i)[k][right].Name + }) + } +} + +func (i Imports) Lookup(file, pkg string) (string, bool) { + if _, ok := i[file]; ok { + for idx := range i[file] { + if i[file][idx].Name == pkg { + return i[file][idx].Pkg, true + } + } + } + + return "", false +} diff --git a/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/vendor/github.com/butuzov/mirror/internal/checker/violation.go new file mode 100644 index 0000000000..375d3c8e65 --- /dev/null +++ b/vendor/github.com/butuzov/mirror/internal/checker/violation.go @@ -0,0 +1,208 @@ +package checker + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "path" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// Type of violation: can be method or function +type ViolationType int + +const ( + Function ViolationType = iota + 1 + Method +) + +const ( + Strings string = "string" + Bytes string = "[]byte" + Byte string = "byte" + Rune string = "rune" + UntypedRune string = "untyped rune" +) + +// Violation describs what message we going to give to a particular code violation +type Violation struct { + Type ViolationType // + Args []int // Indexes of the arguments needs to be checked + ArgsType string + + Targets string + Package string + AltPackage string + Struct string + Caller string + AltCaller string + + // --- tests generation information + Generate *Generate + + // --- suggestions related info about violation of rules. + base []byte // receiver of the method or pkg name + callExpr *ast.CallExpr // actual call expression, to extract arguments + arguments map[int]ast.Expr // fixed arguments +} + +// Tests (generation) related struct. +type Generate struct { + PreCondition string // Precondition we want to be generated + Pattern string // Generate pattern (for the `want` message) + Returns int // Expected to return n elements +} + +func (v *Violation) With(base []byte, e *ast.CallExpr, args map[int]ast.Expr) *Violation { + v.base = base + v.callExpr = e + v.arguments = args + + return v +} + +func (v *Violation) getArgType() string { + if v.ArgsType != "" { + return v.ArgsType + } + + if v.Targets == Strings { + return Bytes + } + + return Strings +} + +func (v *Violation) Message() string { + if v.Type == Method { + return fmt.Sprintf("avoid allocations with (*%s.%s).%s", + path.Base(v.Package), v.Struct, v.AltCaller) + } + + pkg := v.Package + if len(v.AltPackage) > 0 { + pkg = v.AltPackage + } + + return fmt.Sprintf("avoid allocations with %s.%s", path.Base(pkg), v.AltCaller) +} + +func (v *Violation) suggest(fSet *token.FileSet) []byte { + var buf bytes.Buffer + + if len(v.base) > 0 { + buf.Write(v.base) + buf.WriteString(".") + } + + buf.WriteString(v.AltCaller) + buf.WriteByte('(') + for idx := range v.callExpr.Args { + if arg, ok := v.arguments[idx]; ok { + printer.Fprint(&buf, fSet, arg) + } else { + printer.Fprint(&buf, fSet, v.callExpr.Args[idx]) + } + + if idx != len(v.callExpr.Args)-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(')') + + return buf.Bytes() +} + +func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic { + diagnostic := analysis.Diagnostic{ + Pos: v.callExpr.Pos(), + End: v.callExpr.Pos(), + Message: v.Message(), + } + + var buf bytes.Buffer + printer.Fprint(&buf, fSet, v.callExpr) + noNl := bytes.IndexByte(buf.Bytes(), '\n') < 0 + + // Struct based fix. + if v.Type == Method && noNl { + diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ + Message: "Fix Issue With", + TextEdits: []analysis.TextEdit{{ + Pos: v.callExpr.Pos(), End: v.callExpr.End(), NewText: v.suggest(fSet), + }}, + }} + } + + if v.AltPackage == "" { + v.AltPackage = v.Package + } + + // Hooray! we dont need to change package and redo imports. + if v.Type == Function && v.AltPackage == v.Package && noNl { + diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ + Message: "Fix Issue With", + TextEdits: []analysis.TextEdit{{ + Pos: v.callExpr.Pos(), End: v.callExpr.End(), NewText: v.suggest(fSet), + }}, + }} + } + + // do not change + + return diagnostic +} + +type GolangIssue struct { + Start token.Position + End token.Position + Message string + InlineFix string + Original string +} + +// Issue inteanded to be used only with golangci-lint, bu you can use use it +// alongside Diagnostic if you wish. +func (v *Violation) Issue(fSet *token.FileSet) GolangIssue { + issue := GolangIssue{ + Start: fSet.Position(v.callExpr.Pos()), + End: fSet.Position(v.callExpr.End()), + Message: v.Message(), + } + + // original expression (useful for debug & requied for replace) + var buf bytes.Buffer + printer.Fprint(&buf, fSet, v.callExpr) + issue.Original = buf.String() + + noNl := strings.IndexByte(issue.Original, '\n') < 0 + + if v.Type == Method && noNl { + fix := v.suggest(fSet) + issue.InlineFix = string(fix) + } + + if v.AltPackage == "" { + v.AltPackage = v.Package + } + + // Hooray! we don't need to change package and redo imports. + if v.Type == Function && v.AltPackage == v.Package && noNl { + fix := v.suggest(fSet) + issue.InlineFix = string(fix) + } + + return issue +} + +// ofType normalize input types (mostly typed and untyped runes). +func normalType(s string) string { + if s == UntypedRune { + return Rune + } + return s +} diff --git a/vendor/github.com/butuzov/mirror/readme.md b/vendor/github.com/butuzov/mirror/readme.md new file mode 100644 index 0000000000..fcfd1de11a --- /dev/null +++ b/vendor/github.com/butuzov/mirror/readme.md @@ -0,0 +1,60 @@ +# `mirror` [![Code Coverage](https://coveralls.io/repos/github/butuzov/mirror/badge.svg?branch=main)](https://coveralls.io/github/butuzov/mirror?branch=main) [![build status](https://github.com/butuzov/mirror/actions/workflows/main.yaml/badge.svg?branch=main)]() + +`mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib. + +## 🇺🇦 PLEASE HELP ME 🇺🇦 +Fundrise for scout drone **DJI Matrice 30T** for my squad (Ukrainian Forces). See more details at [butuzov/README.md](https://github.com/butuzov/butuzov/) + +## Linter Use Cases + +### `github.com/argoproj/argo-cd` + +```go +// Before +func IsValidHostname(hostname string, fqdn bool) bool { + if !fqdn { + return validHostNameRegexp.Match([]byte(hostname)) || validIPv6Regexp.Match([]byte(hostname)) + } else { + return validFQDNRegexp.Match([]byte(hostname)) + } +} + +// After: With alternative method (and lost `else` case) +func IsValidHostname(hostname string, fqdn bool) bool { + if !fqdn { + return validHostNameRegexp.MatchString(hostname) || validIPv6Regexp.MatchString(hostname) + } + + return validFQDNRegexp.MatchString(hostname) +} +``` + +## Install + +``` +go install github.com/butuzov/mirror/cmd/mirror@latest +``` + +## How to use + +You run `mirror` with [`go vet`](https://pkg.go.dev/cmd/vet): + +``` +go vet -vettool=$(which mirror) ./... +# github.com/jcmoraisjr/haproxy-ingress/pkg/common/net/ssl +pkg/common/net/ssl/ssl.go:64:11: avoid allocations with (*os.File).WriteString +pkg/common/net/ssl/ssl.go:161:12: avoid allocations with (*os.File).WriteString +pkg/common/net/ssl/ssl.go:166:3: avoid allocations with (*os.File).WriteString +``` + +Can be called directly: +``` +mirror ./... +# https://github.com/cosmtrek/air +/air/runner/util.go:149:6: avoid allocations with (*regexp.Regexp).MatchString +/air/runner/util.go:173:14: avoid allocations with (*os.File).WriteString +``` + +## Command line + +- You can add checks for `_test.go` files with cli option `--with-tests` diff --git a/vendor/github.com/catenacyber/perfsprint/LICENSE b/vendor/github.com/catenacyber/perfsprint/LICENSE new file mode 100644 index 0000000000..14c2b9e737 --- /dev/null +++ b/vendor/github.com/catenacyber/perfsprint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Catena cyber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go new file mode 100644 index 0000000000..ad312ca697 --- /dev/null +++ b/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go @@ -0,0 +1,597 @@ +package analyzer + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "golang.org/x/tools/go/analysis" +) + +type perfSprint struct { + intConv bool + errError bool + errorf bool + sprintf1 bool + fiximports bool +} + +func newPerfSprint() *perfSprint { + return &perfSprint{ + intConv: true, + errError: false, + errorf: true, + sprintf1: true, + fiximports: true, + } +} + +func New() *analysis.Analyzer { + n := newPerfSprint() + r := &analysis.Analyzer{ + Name: "perfsprint", + Doc: "Checks that fmt.Sprintf can be replaced with a faster alternative.", + Run: n.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + r.Flags.BoolVar(&n.intConv, "int-conversion", true, "optimizes even if it requires an int or uint type cast") + r.Flags.BoolVar(&n.errError, "err-error", false, "optimizes into err.Error() even if it is only equivalent for non-nil errors") + r.Flags.BoolVar(&n.errorf, "errorf", true, "optimizes fmt.Errorf") + r.Flags.BoolVar(&n.sprintf1, "sprintf1", true, "optimizes fmt.Sprintf with only one argument") + r.Flags.BoolVar(&n.fiximports, "fiximports", true, "fix needed imports from other fixes") + return r +} + +// true if verb is a format string that could be replaced with concatenation. +func isConcatable(verb string) bool { + hasPrefix := + (strings.HasPrefix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || + (strings.HasPrefix(verb, "%[1]s") && !strings.Contains(verb, "%s")) + hasSuffix := + (strings.HasSuffix(verb, "%s") && !strings.Contains(verb, "%[1]s")) || + (strings.HasSuffix(verb, "%[1]s") && !strings.Contains(verb, "%s")) + + return (hasPrefix || hasSuffix) && !(hasPrefix && hasSuffix) +} + +func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { + var fmtSprintObj, fmtSprintfObj, fmtErrorfObj types.Object + for _, pkg := range pass.Pkg.Imports() { + if pkg.Path() == "fmt" { + fmtSprintObj = pkg.Scope().Lookup("Sprint") + fmtSprintfObj = pkg.Scope().Lookup("Sprintf") + fmtErrorfObj = pkg.Scope().Lookup("Errorf") + } + } + if fmtSprintfObj == nil && fmtSprintObj == nil && fmtErrorfObj == nil { + return nil, nil + } + removedFmtUsages := make(map[string]int) + neededPackages := make(map[string]map[string]bool) + + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + called, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return + } + calledObj := pass.TypesInfo.ObjectOf(called.Sel) + + var ( + fn string + verb string + value ast.Expr + err error + ) + switch { + case calledObj == fmtErrorfObj && len(call.Args) == 1: + if n.errorf { + fn = "fmt.Errorf" + verb = "%s" + value = call.Args[0] + } else { + return + } + + case calledObj == fmtSprintObj && len(call.Args) == 1: + fn = "fmt.Sprint" + verb = "%v" + value = call.Args[0] + + case calledObj == fmtSprintfObj && len(call.Args) == 1: + if n.sprintf1 { + fn = "fmt.Sprintf" + verb = "%s" + value = call.Args[0] + } else { + return + } + + case calledObj == fmtSprintfObj && len(call.Args) == 2: + verbLit, ok := call.Args[0].(*ast.BasicLit) + if !ok { + return + } + verb, err = strconv.Unquote(verbLit.Value) + if err != nil { + // Probably unreachable. + return + } + // one single explicit arg is simplified + if strings.HasPrefix(verb, "%[1]") { + verb = "%" + verb[4:] + } + + fn = "fmt.Sprintf" + value = call.Args[1] + + default: + return + } + + switch verb { + default: + if fn == "fmt.Sprintf" && isConcatable(verb) { + break + } + return + case "%d", "%v", "%x", "%t", "%s": + } + + valueType := pass.TypesInfo.TypeOf(value) + a, isArray := valueType.(*types.Array) + s, isSlice := valueType.(*types.Slice) + + var d *analysis.Diagnostic + switch { + case isBasicType(valueType, types.String) && oneOf(verb, "%v", "%s"): + fname := pass.Fset.File(call.Pos()).Name() + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + removedFmtUsages[fname]++ + if fn == "fmt.Errorf" { + neededPackages[fname]["errors"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with errors.New", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use errors.New", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("errors.New("), + }}, + }, + }, + } + } else { + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with just using the string", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Just use string value", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(formatNode(pass.Fset, value)), + }}, + }, + }, + } + } + case types.Implements(valueType, errIface) && oneOf(verb, "%v", "%s") && n.errError: + // known false positive if this error is nil + // fmt.Sprint(nil) does not panic like nil.Error() does + errMethodCall := formatNode(pass.Fset, value) + ".Error()" + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with " + errMethodCall, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use " + errMethodCall, + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(errMethodCall), + }}, + }, + }, + } + + case isBasicType(valueType, types.Bool) && oneOf(verb, "%v", "%t"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatBool", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatBool", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatBool("), + }}, + }, + }, + } + + case isArray && isBasicType(a.Elem(), types.Uint8) && oneOf(verb, "%x"): + if _, ok := value.(*ast.Ident); !ok { + // Doesn't support array literals. + return + } + + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["encoding/hex"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster hex.EncodeToString", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use hex.EncodeToString", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("hex.EncodeToString("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: []byte("[:]"), + }, + }, + }, + }, + } + case isSlice && isBasicType(s.Elem(), types.Uint8) && oneOf(verb, "%x"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["encoding/hex"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster hex.EncodeToString", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use hex.EncodeToString", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("hex.EncodeToString("), + }}, + }, + }, + } + + case isBasicType(valueType, types.Int8, types.Int16, types.Int32) && oneOf(verb, "%v", "%d") && n.intConv: + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.Itoa", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.Itoa", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.Itoa(int("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: []byte(")"), + }, + }, + }, + }, + } + case isBasicType(valueType, types.Int) && oneOf(verb, "%v", "%d"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.Itoa", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.Itoa", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.Itoa("), + }}, + }, + }, + } + case isBasicType(valueType, types.Int64) && oneOf(verb, "%v", "%d"): + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatInt", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatInt", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatInt("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: []byte(", 10"), + }, + }, + }, + }, + } + + case isBasicType(valueType, types.Uint8, types.Uint16, types.Uint32, types.Uint) && oneOf(verb, "%v", "%d", "%x") && n.intConv: + base := []byte("), 10") + if verb == "%x" { + base = []byte("), 16") + } + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatUint", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatUint", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatUint(uint64("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: base, + }, + }, + }, + }, + } + case isBasicType(valueType, types.Uint64) && oneOf(verb, "%v", "%d", "%x"): + base := []byte(", 10") + if verb == "%x" { + base = []byte(", 16") + } + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + _, ok := neededPackages[fname] + if !ok { + neededPackages[fname] = make(map[string]bool) + } + neededPackages[fname]["strconv"] = true + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with faster strconv.FormatUint", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use strconv.FormatUint", + TextEdits: []analysis.TextEdit{ + { + Pos: call.Pos(), + End: value.Pos(), + NewText: []byte("strconv.FormatUint("), + }, + { + Pos: value.End(), + End: value.End(), + NewText: base, + }, + }, + }, + }, + } + case isBasicType(valueType, types.String) && fn == "fmt.Sprintf" && isConcatable(verb): + var fix string + if strings.HasSuffix(verb, "%s") { + fix = strconv.Quote(verb[:len(verb)-2]) + "+" + formatNode(pass.Fset, value) + } else if strings.HasSuffix(verb, "%[1]s") { + fix = strconv.Quote(verb[:len(verb)-5]) + "+" + formatNode(pass.Fset, value) + } else if strings.HasPrefix(verb, "%s") { + fix = formatNode(pass.Fset, value) + "+" + strconv.Quote(verb[2:]) + } else { + fix = formatNode(pass.Fset, value) + "+" + strconv.Quote(verb[5:]) + } + fname := pass.Fset.File(call.Pos()).Name() + removedFmtUsages[fname]++ + d = &analysis.Diagnostic{ + Pos: call.Pos(), + End: call.End(), + Message: fn + " can be replaced with string addition", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Use string addition", + TextEdits: []analysis.TextEdit{{ + Pos: call.Pos(), + End: call.End(), + NewText: []byte(fix), + }}, + }, + }, + } + } + + if d != nil { + pass.Report(*d) + } + }) + + if len(removedFmtUsages) > 0 && n.fiximports { + for _, pkg := range pass.Pkg.Imports() { + if pkg.Path() == "fmt" { + insp = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter = []ast.Node{ + (*ast.SelectorExpr)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + selec := node.(*ast.SelectorExpr) + selecok, ok := selec.X.(*ast.Ident) + if ok { + pkgname, ok := pass.TypesInfo.ObjectOf(selecok).(*types.PkgName) + if ok && pkgname.Name() == pkg.Name() { + fname := pass.Fset.File(pkgname.Pos()).Name() + removedFmtUsages[fname]-- + } + } + }) + } else if pkg.Path() == "errors" || pkg.Path() == "strconv" || pkg.Path() == "encoding/hex" { + insp = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter = []ast.Node{ + (*ast.ImportSpec)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + gd := node.(*ast.ImportSpec) + if gd.Path.Value == strconv.Quote(pkg.Path()) { + fname := pass.Fset.File(gd.Pos()).Name() + _, ok := neededPackages[fname] + if ok { + delete(neededPackages[fname], pkg.Path()) + } + } + }) + } + } + insp = pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter = []ast.Node{ + (*ast.ImportSpec)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + gd := node.(*ast.ImportSpec) + if gd.Path.Value == `"fmt"` { + fix := "" + fname := pass.Fset.File(gd.Pos()).Name() + if removedFmtUsages[fname] < 0 { + fix += `"fmt"` + if len(neededPackages[fname]) == 0 { + return + } + } + keys := make([]string, 0, len(neededPackages[fname])) + for k := range neededPackages[fname] { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fix = fix + "\n\t\"" + k + `"` + } + pass.Report(analysis.Diagnostic{ + Pos: gd.Pos(), + End: gd.End(), + Message: "Fix imports", + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Fix imports", + TextEdits: []analysis.TextEdit{{ + Pos: gd.Pos(), + End: gd.End(), + NewText: []byte(fix), + }}, + }, + }}) + } + }) + } + + return nil, nil +} + +var errIface = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +func isBasicType(lhs types.Type, expected ...types.BasicKind) bool { + for _, rhs := range expected { + if types.Identical(lhs, types.Typ[rhs]) { + return true + } + } + return false +} + +func formatNode(fset *token.FileSet, node ast.Node) string { + buf := new(bytes.Buffer) + if err := format.Node(buf, fset, node); err != nil { + return "" + } + return buf.String() +} + +func oneOf[T comparable](v T, expected ...T) bool { + for _, rhs := range expected { + if v == rhs { + return true + } + } + return false +} diff --git a/vendor/github.com/ccojocar/zxcvbn-go/.gitignore b/vendor/github.com/ccojocar/zxcvbn-go/.gitignore new file mode 100644 index 0000000000..e032cc2fcb --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/.gitignore @@ -0,0 +1,5 @@ +zxcvbn +debug.test + +# SBOMs generated during CI +/bom.json diff --git a/vendor/github.com/ccojocar/zxcvbn-go/.golangci.yml b/vendor/github.com/ccojocar/zxcvbn-go/.golangci.yml new file mode 100644 index 0000000000..b54f70092e --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/.golangci.yml @@ -0,0 +1,39 @@ +linters: + enable: + - asciicheck + - bodyclose + - dogsled + - durationcheck + - errcheck + - errorlint + - exportloopref + - gci + - ginkgolinter + - gofmt + - gofumpt + - goimports + - gosimple + - govet + - importas + - ineffassign + - megacheck + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + - wastedassign + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/ccojocar) + +run: + timeout: 5m diff --git a/vendor/github.com/ccojocar/zxcvbn-go/.goreleaser.yml b/vendor/github.com/ccojocar/zxcvbn-go/.goreleaser.yml new file mode 100644 index 0000000000..2386aeee52 --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/.goreleaser.yml @@ -0,0 +1,27 @@ +--- +project_name: zxcvbn-go + +release: + extra_files: + - glob: ./bom.json + github: + owner: ccojocar + name: zxcvbn-go + +builds: + - main: ./testapp/ + binary: zxcvbn-go + goos: + - darwin + - linux + - windows + goarch: + - amd64 + - arm64 + - s390x + ldflags: -X main.Version={{.Version}} -X main.GitTag={{.Tag}} -X main.BuildDate={{.Date}} + env: + - CGO_ENABLED=0 + +gomod: + proxy: true diff --git a/vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt b/vendor/github.com/ccojocar/zxcvbn-go/LICENSE.txt similarity index 100% rename from vendor/github.com/nbutton23/zxcvbn-go/LICENSE.txt rename to vendor/github.com/ccojocar/zxcvbn-go/LICENSE.txt diff --git a/vendor/github.com/ccojocar/zxcvbn-go/Makefile b/vendor/github.com/ccojocar/zxcvbn-go/Makefile new file mode 100644 index 0000000000..0690f37538 --- /dev/null +++ b/vendor/github.com/ccojocar/zxcvbn-go/Makefile @@ -0,0 +1,61 @@ +GIT_TAG?= $(shell git describe --always --tags) +BIN = zxcvbn-go +FMT_CMD = $(gofmt -s -l -w $(find . -type f -name '*.go' -not -path './vendor/*') | tee /dev/stderr) +IMAGE_REPO = ccojocar +DATE_FMT=+%Y-%m-%d +ifdef SOURCE_DATE_EPOCH + BUILD_DATE ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u "$(DATE_FMT)") +else + BUILD_DATE ?= $(shell date "$(DATE_FMT)") +endif +BUILDFLAGS := "-w -s -X 'main.Version=$(GIT_TAG)' -X 'main.GitTag=$(GIT_TAG)' -X 'main.BuildDate=$(BUILD_DATE)'" +CGO_ENABLED = 0 +GO := GO111MODULE=on go +GO_NOMOD :=GO111MODULE=off go +GOPATH ?= $(shell $(GO) env GOPATH) +GOBIN ?= $(GOPATH)/bin +GO_MINOR_VERSION = $(shell $(GO) version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) +GOVULN_MIN_VERSION = 17 +GO_VERSION = 1.20 + +default: + $(MAKE) test + +install-govulncheck: + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + go install golang.org/x/vuln/cmd/govulncheck@latest; \ + fi + +test-all: fmt vet lint sec govulncheck test + +test: + go test -v ./... + +fmt: + @echo "FORMATTING" + @FORMATTED=`$(GO) fmt ./...` + @([ ! -z "$(FORMATTED)" ] && printf "Fixed unformatted files:\n$(FORMATTED)") || true + +vet: + @echo "VETTING" + $(GO) vet ./... + +lint: + @echo "LINTING: golangci-lint" + golangci-lint run + +sec: + @echo "SECURITY SCANNING" + gosec ./... + +govulncheck: install-govulncheck + @echo "CHECKING VULNERABILITIES" + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + govulncheck ./...; \ + fi + +clean: + rm -rf build vendor dist coverage.txt + rm -f release image $(BIN) + +.PHONY: test test-all fmt vet govulncheck clean diff --git a/vendor/github.com/nbutton23/zxcvbn-go/README.md b/vendor/github.com/ccojocar/zxcvbn-go/README.md similarity index 100% rename from vendor/github.com/nbutton23/zxcvbn-go/README.md rename to vendor/github.com/ccojocar/zxcvbn-go/README.md diff --git a/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go b/vendor/github.com/ccojocar/zxcvbn-go/adjacency/adjcmartix.go similarity index 82% rename from vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go rename to vendor/github.com/ccojocar/zxcvbn-go/adjacency/adjcmartix.go index 66ad30b822..34526685cc 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/adjacency/adjcmartix.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/adjacency/adjcmartix.go @@ -4,7 +4,7 @@ import ( "encoding/json" "log" - "github.com/nbutton23/zxcvbn-go/data" + "github.com/ccojocar/zxcvbn-go/data" ) // Graph holds information about different graphs @@ -25,7 +25,7 @@ func init() { GraphMap["l33t"] = BuildLeet() } -//BuildQwerty builds the Qwerty Graph +// BuildQwerty builds the Qwerty Graph func BuildQwerty() Graph { data, err := data.Asset("data/Qwerty.json") if err != nil { @@ -34,7 +34,7 @@ func BuildQwerty() Graph { return getAdjancencyGraphFromFile(data, "qwerty") } -//BuildDvorak builds the Dvorak Graph +// BuildDvorak builds the Dvorak Graph func BuildDvorak() Graph { data, err := data.Asset("data/Dvorak.json") if err != nil { @@ -43,7 +43,7 @@ func BuildDvorak() Graph { return getAdjancencyGraphFromFile(data, "dvorak") } -//BuildKeypad builds the Keypad Graph +// BuildKeypad builds the Keypad Graph func BuildKeypad() Graph { data, err := data.Asset("data/Keypad.json") if err != nil { @@ -52,7 +52,7 @@ func BuildKeypad() Graph { return getAdjancencyGraphFromFile(data, "keypad") } -//BuildMacKeypad builds the Mac Keypad Graph +// BuildMacKeypad builds the Mac Keypad Graph func BuildMacKeypad() Graph { data, err := data.Asset("data/MacKeypad.json") if err != nil { @@ -61,7 +61,7 @@ func BuildMacKeypad() Graph { return getAdjancencyGraphFromFile(data, "mac_keypad") } -//BuildLeet builds the L33T Graph +// BuildLeet builds the L33T Graph func BuildLeet() Graph { data, err := data.Asset("data/L33t.json") if err != nil { @@ -71,7 +71,6 @@ func BuildLeet() Graph { } func getAdjancencyGraphFromFile(data []byte, name string) Graph { - var graph Graph err := json.Unmarshal(data, &graph) if err != nil { @@ -82,9 +81,9 @@ func getAdjancencyGraphFromFile(data []byte, name string) Graph { } // CalculateAvgDegree calclates the average degree between nodes in the graph -//on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1. -//this calculates the average over all keys. -//TODO double check that i ported this correctly scoring.coffee ln 5 +// on qwerty, 'g' has degree 6, being adjacent to 'ftyhbv'. '\' has degree 1. +// this calculates the average over all keys. +// TODO double check that i ported this correctly scoring.coffee ln 5 func (adjGrp Graph) CalculateAvgDegree() float64 { if adjGrp.averageDegree != float64(0) { return adjGrp.averageDegree @@ -92,14 +91,12 @@ func (adjGrp Graph) CalculateAvgDegree() float64 { var avg float64 var count float64 for _, value := range adjGrp.Graph { - for _, char := range value { if len(char) != 0 || char != " " { avg += float64(len(char)) count++ } } - } adjGrp.averageDegree = avg / count diff --git a/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go b/vendor/github.com/ccojocar/zxcvbn-go/data/bindata.go similarity index 99% rename from vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go rename to vendor/github.com/ccojocar/zxcvbn-go/data/bindata.go index f3a0c010ca..3db0f1b100 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/data/bindata.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/data/bindata.go @@ -33,7 +33,7 @@ func bindataRead(data []byte, name string) ([]byte, error) { } var buf bytes.Buffer - _, err = io.Copy(&buf, gz) + _, err = io.Copy(&buf, gz) // #nosec clErr := gz.Close() if err != nil { @@ -345,11 +345,13 @@ var _bindata = map[string]func() (*asset, error){ // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the // following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png +// +// data/ +// foo.txt +// img/ +// a.png +// b.png +// // then AssetDir("data") would return []string{"foo.txt", "img"} // AssetDir("data/img") would return []string{"a.png", "b.png"} // AssetDir("foo.txt") and AssetDir("notexist") would return an error diff --git a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go b/vendor/github.com/ccojocar/zxcvbn-go/entropy/entropyCalculator.go similarity index 77% rename from vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go rename to vendor/github.com/ccojocar/zxcvbn-go/entropy/entropyCalculator.go index 8f57ea0a47..80432572bd 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/entropy/entropyCalculator.go @@ -1,12 +1,13 @@ package entropy import ( - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/match" - "github.com/nbutton23/zxcvbn-go/utils/math" "math" "regexp" "unicode" + + "github.com/ccojocar/zxcvbn-go/adjacency" + "github.com/ccojocar/zxcvbn-go/match" + zxcvbnmath "github.com/ccojocar/zxcvbn-go/utils/math" ) const ( @@ -27,7 +28,7 @@ var ( func DictionaryEntropy(match match.Match, rank float64) float64 { baseEntropy := math.Log2(rank) upperCaseEntropy := extraUpperCaseEntropy(match) - //TODO: L33t + // TODO: L33t return baseEntropy + upperCaseEntropy } @@ -46,18 +47,18 @@ func extraUpperCaseEntropy(match match.Match) float64 { return float64(0) } - //a capitalized word is the most common capitalization scheme, - //so it only doubles the search space (uncapitalized + capitalized): 1 extra bit of entropy. - //allcaps and end-capitalized are common enough too, underestimate as 1 extra bit to be safe. + // a capitalized word is the most common capitalization scheme, + // so it only doubles the search space (uncapitalized + capitalized): 1 extra bit of entropy. + // allcaps and end-capitalized are common enough too, underestimate as 1 extra bit to be safe. for _, matcher := range []*regexp.Regexp{startUpperRx, endUpperRx, allUpperRx} { if matcher.MatchString(word) { return float64(1) } } - //Otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters with U uppercase letters or - //less. Or, if there's more uppercase than lower (for e.g. PASSwORD), the number of ways to lowercase U+L letters - //with L lowercase letters or less. + // Otherwise calculate the number of ways to capitalize U+L uppercase+lowercase letters with U uppercase letters or + // less. Or, if there's more uppercase than lower (for e.g. PASSwORD), the number of ways to lowercase U+L letters + // with L lowercase letters or less. countUpper, countLower := float64(0), float64(0) for _, char := range word { @@ -71,21 +72,21 @@ func extraUpperCaseEntropy(match match.Match) float64 { var possibililities float64 for i := float64(0); i <= math.Min(countUpper, countLower); i++ { - possibililities += float64(zxcvbnmath.NChoseK(totalLenght, i)) + possibililities += zxcvbnmath.NChoseK(totalLenght, i) } if possibililities < 1 { return float64(1) } - return float64(math.Log2(possibililities)) + return (math.Log2(possibililities)) } // SpatialEntropy calculates the entropy for spatial matches func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { var s, d float64 if match.DictionaryName == "qwerty" || match.DictionaryName == "dvorak" { - //todo: verify qwerty and dvorak have the same length and degree + // todo: verify qwerty and dvorak have the same length and degree s = float64(len(adjacency.BuildQwerty().Graph)) d = adjacency.BuildQwerty().CalculateAvgDegree() } else { @@ -97,8 +98,8 @@ func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { length := float64(len(match.Token)) - //TODO: Should this be <= or just < ? - //Estimate the number of possible patterns w/ length L or less with t turns or less + // TODO: Should this be <= or just < ? + // Estimate the number of possible patterns w/ length L or less with t turns or less for i := float64(2); i <= length+1; i++ { possibleTurns := math.Min(float64(turns), i-1) for j := float64(1); j <= possibleTurns+1; j++ { @@ -108,8 +109,8 @@ func SpatialEntropy(match match.Match, turns int, shiftCount int) float64 { } entropy := math.Log2(possibilities) - //add extra entropu for shifted keys. ( % instead of 5 A instead of a) - //Math is similar to extra entropy for uppercase letters in dictionary matches. + // add extra entropu for shifted keys. ( % instead of 5 A instead of a) + // Math is similar to extra entropy for uppercase letters in dictionary matches. if S := float64(shiftCount); S > float64(0) { possibilities = float64(0) @@ -134,7 +135,7 @@ func RepeatEntropy(match match.Match) float64 { } // CalcBruteForceCardinality calculates the brute force cardinality -//TODO: Validate against python +// TODO: Validate against python func CalcBruteForceCardinality(password string) float64 { lower, upper, digits, symbols := float64(0), float64(0), float64(0), float64(0) @@ -157,12 +158,12 @@ func CalcBruteForceCardinality(password string) float64 { // SequenceEntropy calculates the entropy for sequences such as 4567 or cdef func SequenceEntropy(match match.Match, dictionaryLength int, ascending bool) float64 { firstChar := match.Token[0] - baseEntropy := float64(0) + var baseEntropy float64 if string(firstChar) == "a" || string(firstChar) == "1" { baseEntropy = float64(0) } else { baseEntropy = math.Log2(float64(dictionaryLength)) - //TODO: should this be just the first or any char? + // TODO: should this be just the first or any char? if unicode.IsUpper(rune(firstChar)) { baseEntropy++ } @@ -183,7 +184,7 @@ func ExtraLeetEntropy(match match.Match, password string) float64 { if string(char) != string(match.Token[index]) { subsitutions++ } else { - //TODO: Make this only true for 1337 chars that are not subs? + // TODO: Make this only true for 1337 chars that are not subs? unsub++ } } @@ -210,7 +211,7 @@ func DateEntropy(dateMatch match.DateMatch) float64 { } if dateMatch.Separator != "" { - entropy += 2 //add two bits for separator selection [/,-,.,etc] + entropy += 2 // add two bits for separator selection [/,-,.,etc] } return entropy } diff --git a/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go b/vendor/github.com/ccojocar/zxcvbn-go/frequency/frequency.go similarity index 96% rename from vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go rename to vendor/github.com/ccojocar/zxcvbn-go/frequency/frequency.go index d056e4d4e6..4f51369e1f 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/frequency/frequency.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/frequency/frequency.go @@ -4,7 +4,7 @@ import ( "encoding/json" "log" - "github.com/nbutton23/zxcvbn-go/data" + "github.com/ccojocar/zxcvbn-go/data" ) // List holds a frequency list @@ -28,8 +28,8 @@ func init() { Lists["Surname"] = getStringListFromAsset(surnameFilePath, "Surname") Lists["English"] = getStringListFromAsset(englishFilePath, "English") Lists["Passwords"] = getStringListFromAsset(passwordsFilePath, "Passwords") - } + func getAsset(name string) []byte { data, err := data.Asset(name) if err != nil { @@ -38,8 +38,8 @@ func getAsset(name string) []byte { return data } -func getStringListFromAsset(data []byte, name string) List { +func getStringListFromAsset(data []byte, name string) List { var tempList List err := json.Unmarshal(data, &tempList) if err != nil { diff --git a/vendor/github.com/nbutton23/zxcvbn-go/match/match.go b/vendor/github.com/ccojocar/zxcvbn-go/match/match.go similarity index 80% rename from vendor/github.com/nbutton23/zxcvbn-go/match/match.go rename to vendor/github.com/ccojocar/zxcvbn-go/match/match.go index dd30bea042..998dde1112 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/match/match.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/match/match.go @@ -1,14 +1,16 @@ package match -//Matches is an alies for []Match used for sorting +// Matches is an alies for []Match used for sorting type Matches []Match func (s Matches) Len() int { return len(s) } + func (s Matches) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + func (s Matches) Less(i, j int) bool { if s[i].I < s[j].I { return true @@ -28,7 +30,7 @@ type Match struct { Entropy float64 } -//DateMatch is specifilly a match for type date +// DateMatch is specifilly a match for type date type DateMatch struct { Pattern string I, J int @@ -37,7 +39,7 @@ type DateMatch struct { Day, Month, Year int64 } -//Matcher are a func and ID that can be used to match different passwords +// Matcher are a func and ID that can be used to match different passwords type Matcher struct { MatchingFunc func(password string) []Match ID string diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/dateMatchers.go similarity index 93% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/dateMatchers.go index 8dfdf2410b..fd7f383320 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/dateMatchers.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/dateMatchers.go @@ -5,8 +5,8 @@ import ( "strconv" "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) const ( @@ -20,12 +20,12 @@ var ( dateWithOutSepMatch = regexp.MustCompile(`\d{4,8}`) ) -//FilterDateSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterDateSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterDateSepMatcher(m match.Matcher) bool { return m.ID == dateSepMatcherName } -//FilterDateWithoutSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterDateWithoutSepMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterDateWithoutSepMatcher(m match.Matcher) bool { return m.ID == dateWithOutSepMatcherName } @@ -64,8 +64,8 @@ func dateSepMatcher(password string) []match.Match { return matches } -func dateSepMatchHelper(password string) []match.DateMatch { +func dateSepMatchHelper(password string) []match.DateMatch { var matches []match.DateMatch for _, v := range dateRxYearSuffix.FindAllString(password, len(password)) { @@ -101,7 +101,6 @@ func dateSepMatchHelper(password string) []match.DateMatch { } } return out - } type dateMatchCandidate struct { @@ -136,7 +135,7 @@ func dateWithoutSepMatch(password string) []match.Match { return matches } -//TODO Has issues with 6 digit dates +// TODO Has issues with 6 digit dates func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { for _, v := range dateWithOutSepMatch.FindAllString(password, len(password)) { i := strings.Index(password, v) @@ -146,17 +145,17 @@ func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { var candidatesRoundOne []dateMatchCandidate if length <= 6 { - //2-digit year prefix + // 2-digit year prefix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[2:], v[0:2], i, j)) - //2-digityear suffix + // 2-digityear suffix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[0:lastIndex-2], v[lastIndex-2:], i, j)) } if length >= 6 { - //4-digit year prefix + // 4-digit year prefix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[4:], v[0:4], i, j)) - //4-digit year sufix + // 4-digit year sufix candidatesRoundOne = append(candidatesRoundOne, buildDateMatchCandidate(v[0:lastIndex-3], v[lastIndex-3:], i, j)) } @@ -179,7 +178,6 @@ func dateWithoutSepMatchHelper(password string) (matches []match.DateMatch) { } intMonth, err := strconv.ParseInt(candidate.Month, 10, 16) - if err != nil { continue } @@ -204,6 +202,5 @@ func buildDateMatchCandidate(dayMonth, year string, i, j int) dateMatchCandidate } func buildDateMatchCandidateTwo(day, month string, year string, i, j int) dateMatchCandidateTwo { - return dateMatchCandidateTwo{Day: day, Month: month, Year: year, I: i, J: j} } diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/dictionaryMatch.go similarity index 89% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/dictionaryMatch.go index 4ddb2c3b01..d0d4501880 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/dictionaryMatch.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/dictionaryMatch.go @@ -3,8 +3,8 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) func buildDictMatcher(dictName string, rankedDict map[string]int) func(password string) []match.Match { @@ -15,7 +15,6 @@ func buildDictMatcher(dictName string, rankedDict map[string]int) func(password } return matches } - } func dictionaryMatch(password string, dictionaryName string, rankedDict map[string]int) []match.Match { @@ -29,7 +28,8 @@ func dictionaryMatch(password string, dictionaryName string, rankedDict map[stri for j := i; j < length; j++ { word := pwLowerRunes[i : j+1] if val, ok := rankedDict[string(word)]; ok { - matchDic := match.Match{Pattern: "dictionary", + matchDic := match.Match{ + Pattern: "dictionary", DictionaryName: dictionaryName, I: i, J: j, @@ -46,7 +46,6 @@ func dictionaryMatch(password string, dictionaryName string, rankedDict map[stri } func buildRankedDict(unrankedList []string) map[string]int { - result := make(map[string]int) for i, v := range unrankedList { diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/leet.go similarity index 95% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/leet.go index 610f1973fc..1f303aa6ea 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/leet.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/leet.go @@ -3,14 +3,14 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) // L33TMatcherName id const L33TMatcherName = "l33t" -//FilterL33tMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterL33tMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterL33tMatcher(m match.Matcher) bool { return m.ID == L33TMatcherName } @@ -105,7 +105,7 @@ func createListOfMapsWithoutConflicts(table map[string][]string) []map[string][] return result } -// This function retrieves the list of values that appear for one or more keys. This is usefull to +// This function retrieves the list of values that appear for one or more keys. This is useful to // know which l33t chars can represent more than one letter. func retrieveConflictsListFromTable(table map[string][]string) []string { result := []string{} @@ -128,7 +128,7 @@ func retrieveConflictsListFromTable(table map[string][]string) []string { } // This function aims to create different maps for a given char if this char represents a conflict. -// If the specified char is not a conflit one, the same map will be returned. In scenarios which +// If the specified char is not a conflict one, the same map will be returned. In scenarios which // the provided char can not be found on map, an empty list will be returned. This function was // designed to be used on conflicts situations. func createDifferentMapsForLeetChar(table map[string][]string, leetChar string) []map[string][]string { @@ -158,7 +158,7 @@ func retrieveListOfKeysWithSpecificValueFromTable(table map[string][]string, val return result } -// This function returns a lsit of substitution map from a given table. Each map in the result will +// This function returns a list of substitution map from a given table. Each map in the result will // provide only one representation for each value. As an example, if the provided map contains the // values "@" and "4" in the possibilities to represent "a", two maps will be created where one // will contain "a" mapping to "@" and the other one will provide "a" mapping to "4". diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/matching.go similarity index 87% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/matching.go index 4577db8a4f..c6948067bc 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/matching.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/matching.go @@ -3,9 +3,9 @@ package matching import ( "sort" - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/frequency" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/adjacency" + "github.com/ccojocar/zxcvbn-go/frequency" + "github.com/ccojocar/zxcvbn-go/match" ) var ( @@ -23,8 +23,7 @@ func init() { // Omnimatch runs all matchers against the password func Omnimatch(password string, userInputs []string, filters ...func(match.Matcher) bool) (matches []match.Match) { - - //Can I run into the issue where nil is not equal to nil? + // Can I run into the issue where nil is not equal to nil? if dictionaryMatchers == nil || adjacencyGraphs == nil { loadFrequencyList() } @@ -51,7 +50,6 @@ func Omnimatch(password string, userInputs []string, filters ...func(match.Match } func loadFrequencyList() { - for n, list := range frequency.Lists { dictionaryMatchers = append(dictionaryMatchers, match.Matcher{MatchingFunc: buildDictMatcher(n, buildRankedDict(list.List)), ID: n}) } @@ -63,8 +61,8 @@ func loadFrequencyList() { adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["keypad"]) adjacencyGraphs = append(adjacencyGraphs, adjacency.GraphMap["macKeypad"]) - //l33tFilePath, _ := filepath.Abs("adjacency/L33t.json") - //L33T_TABLE = adjacency.GetAdjancencyGraphFromFile(l33tFilePath, "l33t") + // l33tFilePath, _ := filepath.Abs("adjacency/L33t.json") + // L33T_TABLE = adjacency.GetAdjancencyGraphFromFile(l33tFilePath, "l33t") sequences = make(map[string]string) sequences["lower"] = "abcdefghijklmnopqrstuvwxyz" @@ -78,5 +76,4 @@ func loadFrequencyList() { matchers = append(matchers, match.Matcher{MatchingFunc: l33tMatch, ID: L33TMatcherName}) matchers = append(matchers, match.Matcher{MatchingFunc: dateSepMatcher, ID: dateSepMatcherName}) matchers = append(matchers, match.Matcher{MatchingFunc: dateWithoutSepMatch, ID: dateWithOutSepMatcherName}) - } diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/repeatMatch.go similarity index 74% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/repeatMatch.go index a93e459356..d52ba4254b 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/repeatMatch.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/repeatMatch.go @@ -3,13 +3,13 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) const repeatMatcherName = "REPEAT" -//FilterRepeatMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterRepeatMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterRepeatMatcher(m match.Matcher) bool { return m.ID == repeatMatcherName } @@ -17,7 +17,7 @@ func FilterRepeatMatcher(m match.Matcher) bool { func repeatMatch(password string) []match.Match { var matches []match.Match - //Loop through password. if current == prev currentStreak++ else if currentStreak > 2 {buildMatch; currentStreak = 1} prev = current + // Loop through password. if current == prev currentStreak++ else if currentStreak > 2 {buildMatch; currentStreak = 1} prev = current var current, prev string currentStreak := 1 var i int @@ -29,9 +29,8 @@ func repeatMatch(password string) []match.Match { continue } - if strings.ToLower(current) == strings.ToLower(prev) { + if strings.EqualFold(current, prev) { currentStreak++ - } else if currentStreak > 2 { iPos := i - currentStreak jPos := i - 1 @@ -40,7 +39,8 @@ func repeatMatch(password string) []match.Match { I: iPos, J: jPos, Token: password[iPos : jPos+1], - DictionaryName: prev} + DictionaryName: prev, + } matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) matches = append(matches, matchRepeat) currentStreak = 1 @@ -59,7 +59,8 @@ func repeatMatch(password string) []match.Match { I: iPos, J: jPos, Token: password[iPos : jPos+1], - DictionaryName: prev} + DictionaryName: prev, + } matchRepeat.Entropy = entropy.RepeatEntropy(matchRepeat) matches = append(matches, matchRepeat) } diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/sequenceMatch.go similarity index 88% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/sequenceMatch.go index e0ed052293..6971945838 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/sequenceMatch.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/sequenceMatch.go @@ -3,13 +3,13 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) const sequenceMatcherName = "SEQ" -//FilterSequenceMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterSequenceMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterSequenceMatcher(m match.Matcher) bool { return m.ID == sequenceMatcherName } @@ -64,10 +64,8 @@ func sequenceMatch(password string) []match.Match { matches = append(matches, matchSequence) } break - } else { - j++ } - + j++ } } i = j diff --git a/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go b/vendor/github.com/ccojocar/zxcvbn-go/matching/spatialMatch.go similarity index 59% rename from vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go rename to vendor/github.com/ccojocar/zxcvbn-go/matching/spatialMatch.go index fd858f5d17..101ccea5e5 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/matching/spatialMatch.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/matching/spatialMatch.go @@ -3,14 +3,14 @@ package matching import ( "strings" - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/adjacency" + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" ) const spatialMatcherName = "SPATIAL" -//FilterSpatialMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher +// FilterSpatialMatcher can be pass to zxcvbn-go.PasswordStrength to skip that matcher func FilterSpatialMatcher(m match.Matcher) bool { return m.ID == spatialMatcherName } @@ -25,39 +25,38 @@ func spatialMatch(password string) (matches []match.Match) { } func spatialMatchHelper(password string, graph adjacency.Graph) (matches []match.Match) { - for i := 0; i < len(password)-1; { j := i + 1 - lastDirection := -99 //an int that it should never be! + lastDirection := -99 // an int that it should never be! turns := 0 shiftedCount := 0 for { prevChar := password[j-1] found := false - foundDirection := -1 + var foundDirection int curDirection := -1 - //My graphs seem to be wrong. . . and where the hell is qwerty + // My graphs seem to be wrong. . . and where the hell is qwerty adjacents := graph.Graph[string(prevChar)] - //Consider growing pattern by one character if j hasn't gone over the edge + // Consider growing pattern by one character if j hasn't gone over the edge if j < len(password) { curChar := password[j] for _, adj := range adjacents { curDirection++ - if strings.Index(adj, string(curChar)) != -1 { + if strings.Contains(adj, string(curChar)) { found = true foundDirection = curDirection if strings.Index(adj, string(curChar)) == 1 { - //index 1 in the adjacency means the key is shifted, 0 means unshifted: A vs a, % vs 5, etc. - //for example, 'q' is adjacent to the entry '2@'. @ is shifted w/ index 1, 2 is unshifted. + // index 1 in the adjacency means the key is shifted, 0 means unshifted: A vs a, % vs 5, etc. + // for example, 'q' is adjacent to the entry '2@'. @ is shifted w/ index 1, 2 is unshifted. shiftedCount++ } if lastDirection != foundDirection { - //adding a turn is correct even in the initial case when last_direction is null: - //every spatial pattern starts with a turn. + // adding a turn is correct even in the initial case when last_direction is null: + // every spatial pattern starts with a turn. turns++ lastDirection = foundDirection } @@ -66,12 +65,12 @@ func spatialMatchHelper(password string, graph adjacency.Graph) (matches []match } } - //if the current pattern continued, extend j and try to grow again + // if the current pattern continued, extend j and try to grow again if found { j++ } else { - //otherwise push the pattern discovered so far, if any... - //don't consider length 1 or 2 chains. + // otherwise push the pattern discovered so far, if any... + // don't consider length 1 or 2 chains. if j-i > 2 { matchSpc := match.Match{Pattern: "spatial", I: i, J: j - 1, Token: password[i:j], DictionaryName: graph.Name} matchSpc.Entropy = entropy.SpatialEntropy(matchSpc, turns, shiftedCount) diff --git a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go b/vendor/github.com/ccojocar/zxcvbn-go/scoring/scoring.go similarity index 84% rename from vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go rename to vendor/github.com/ccojocar/zxcvbn-go/scoring/scoring.go index 4f68a6dca6..dbe3318848 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/scoring/scoring.go @@ -2,11 +2,12 @@ package scoring import ( "fmt" - "github.com/nbutton23/zxcvbn-go/entropy" - "github.com/nbutton23/zxcvbn-go/match" - "github.com/nbutton23/zxcvbn-go/utils/math" "math" "sort" + + "github.com/ccojocar/zxcvbn-go/entropy" + "github.com/ccojocar/zxcvbn-go/match" + zxcvbnmath "github.com/ccojocar/zxcvbn-go/utils/math" ) const ( @@ -15,7 +16,7 @@ const ( //adjust for your site accordingly if you use another hash function, possibly by //several orders of magnitude! singleGuess float64 = 0.010 - numAttackers float64 = 100 //Cores used to make guesses + numAttackers float64 = 100 // Cores used to make guesses secondsPerGuess float64 = singleGuess / numAttackers ) @@ -33,11 +34,11 @@ type MinEntropyMatch struct { /* MinimumEntropyMatchSequence returns the minimum entropy - Takes a list of overlapping matches, returns the non-overlapping sublist with - minimum entropy. O(nm) dp alg for length-n password with m candidate matches. + Takes a list of overlapping matches, returns the non-overlapping sublist with + minimum entropy. O(nm) dp alg for length-n password with m candidate matches. */ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntropyMatch { - bruteforceCardinality := float64(entropy.CalcBruteForceCardinality(password)) + bruteforceCardinality := entropy.CalcBruteForceCardinality(password) upToK := make([]float64, len(password)) backPointers := make([]match.Match, len(password)) @@ -50,7 +51,7 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr } i, j := match.I, match.J - //see if best entropy up to i-1 + entropy of match is less that current min at j + // see if best entropy up to i-1 + entropy of match is less that current min at j upTo := get(upToK, i-1) candidateEntropy := upTo + match.Entropy @@ -62,7 +63,7 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr } } - //walk backwards and decode the best sequence + // walk backwards and decode the best sequence var matchSequence []match.Match passwordLen := len(password) passwordLen-- @@ -80,12 +81,13 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr sort.Sort(match.Matches(matchSequence)) makeBruteForceMatch := func(i, j int) match.Match { - return match.Match{Pattern: "bruteforce", + return match.Match{ + Pattern: "bruteforce", I: i, J: j, Token: password[i : j+1], - Entropy: math.Log2(math.Pow(bruteforceCardinality, float64(j-i)))} - + Entropy: math.Log2(math.Pow(bruteforceCardinality, float64(j-i))), + } } k := 0 @@ -110,14 +112,16 @@ func MinimumEntropyMatchSequence(password string, matches []match.Match) MinEntr } crackTime := roundToXDigits(entropyToCrackTime(minEntropy), 3) - return MinEntropyMatch{Password: password, + return MinEntropyMatch{ + Password: password, Entropy: roundToXDigits(minEntropy, 3), MatchSequence: matchSequenceCopy, CrackTime: crackTime, CrackTimeDisplay: displayTime(crackTime), - Score: crackTimeToScore(crackTime)} - + Score: crackTimeToScore(crackTime), + } } + func get(a []float64, i int) float64 { if i < 0 || i >= len(a) { return float64(0) diff --git a/vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go b/vendor/github.com/ccojocar/zxcvbn-go/utils/math/mathutils.go similarity index 100% rename from vendor/github.com/nbutton23/zxcvbn-go/utils/math/mathutils.go rename to vendor/github.com/ccojocar/zxcvbn-go/utils/math/mathutils.go diff --git a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go b/vendor/github.com/ccojocar/zxcvbn-go/zxcvbn.go similarity index 76% rename from vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go rename to vendor/github.com/ccojocar/zxcvbn-go/zxcvbn.go index 9c34b1c8c0..f3dc19e4c5 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go +++ b/vendor/github.com/ccojocar/zxcvbn-go/zxcvbn.go @@ -3,10 +3,10 @@ package zxcvbn import ( "time" - "github.com/nbutton23/zxcvbn-go/match" - "github.com/nbutton23/zxcvbn-go/matching" - "github.com/nbutton23/zxcvbn-go/scoring" - "github.com/nbutton23/zxcvbn-go/utils/math" + "github.com/ccojocar/zxcvbn-go/match" + "github.com/ccojocar/zxcvbn-go/matching" + "github.com/ccojocar/zxcvbn-go/scoring" + zxcvbnmath "github.com/ccojocar/zxcvbn-go/utils/math" ) // PasswordStrength takes a password, userInputs and optional filters and returns a MinEntropyMatch diff --git a/vendor/github.com/chavacava/garif/enums.go b/vendor/github.com/chavacava/garif/enums.go new file mode 100644 index 0000000000..dea2daf131 --- /dev/null +++ b/vendor/github.com/chavacava/garif/enums.go @@ -0,0 +1,41 @@ +package garif + +type ResultKind string + +// declare JSON values +const ( + _pass ResultKind = "pass" + _open ResultKind = "open" + _informational ResultKind = "informational" + _notApplicable ResultKind = "notApplicable" + _review ResultKind = "review" + _fail ResultKind = "fail" +) + +// create public visible constants with a namespace as enums +const ( + ResultKind_Pass ResultKind = _pass + ResultKind_Open ResultKind = _open + ResultKind_Informational ResultKind = _informational + ResultKind_NotApplicable ResultKind = _notApplicable + ResultKind_Review ResultKind = _review + ResultKind_Fail ResultKind = _fail +) + +type ResultLevel string + +// declare JSON values +const ( + _warning ResultLevel = "warning" + _error ResultLevel = "error" + _note ResultLevel = "note" + _none ResultLevel = "none" +) + +// create public visible constants with a namespace as enums +const ( + ResultLevel_Warning ResultLevel = _warning + ResultLevel_Error ResultLevel = _error + ResultLevel_Note ResultLevel = _note + ResultLevel_None ResultLevel = _none +) diff --git a/vendor/github.com/chavacava/garif/models.go b/vendor/github.com/chavacava/garif/models.go index 3668436a3c..f16a86136e 100644 --- a/vendor/github.com/chavacava/garif/models.go +++ b/vendor/github.com/chavacava/garif/models.go @@ -935,10 +935,10 @@ type Result struct { HostedViewerUri string `json:"hostedViewerUri,omitempty"` // A value that categorizes results by evaluation state. - Kind interface{} `json:"kind,omitempty"` + Kind ResultKind `json:"kind,omitempty"` // A value specifying the severity level of the result. - Level interface{} `json:"level,omitempty"` + Level ResultLevel `json:"level,omitempty"` // The set of locations where the result was detected. Specify only one location unless the problem indicated by the result can only be corrected by making a change at every specified location. Locations []*Location `json:"locations,omitempty"` diff --git a/vendor/github.com/curioswitch/go-reassign/.gitattributes b/vendor/github.com/curioswitch/go-reassign/.gitattributes new file mode 100644 index 0000000000..d020be8ea4 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.gitattributes @@ -0,0 +1,2 @@ +*.go text eol=lf + diff --git a/vendor/github.com/curioswitch/go-reassign/.gitignore b/vendor/github.com/curioswitch/go-reassign/.gitignore new file mode 100644 index 0000000000..59fa33613b --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.gitignore @@ -0,0 +1,6 @@ +.idea +.VSCode +.envrc + +build +dist diff --git a/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/vendor/github.com/curioswitch/go-reassign/.golangci.yml new file mode 100644 index 0000000000..e3bf79ae72 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.golangci.yml @@ -0,0 +1,38 @@ +linters: + enable: + - asasalint + - bidichk + - bodyclose + - decorder + - durationcheck + - errchkjson + - errname + - errorlint + - execinquery + - exhaustive + - exportloopref + - gocritic + - goerr113 + - gofmt + - goimports + - goprintffuncname + - gosec + - importas + - misspell + - nolintlint + - nosnakecase + - prealloc + - predeclared + - promlinter + - revive + - stylecheck + - tagliatelle + - tenv + - thelper + - unconvert + - usestdlibvars +issues: + exclude-rules: + - path: magefile\.go + linters: + - deadcode diff --git a/vendor/github.com/curioswitch/go-reassign/.goreleaser.yaml b/vendor/github.com/curioswitch/go-reassign/.goreleaser.yaml new file mode 100644 index 0000000000..25f2dc0c17 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/.goreleaser.yaml @@ -0,0 +1,27 @@ +builds: + - main: ./cmd + env: + - CGO_ENABLED=0 + targets: + - linux_amd64 + - linux_arm64 + - darwin_amd64 + - darwin_arm64 + - windows_amd64 + - windows_arm64 +archives: + - format_overrides: + - goos: windows + format: zip +release: + mode: append +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/vendor/github.com/curioswitch/go-reassign/LICENSE b/vendor/github.com/curioswitch/go-reassign/LICENSE new file mode 100644 index 0000000000..9f18bde002 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Choko (choko@curioswitch.org) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/curioswitch/go-reassign/README.md b/vendor/github.com/curioswitch/go-reassign/README.md new file mode 100644 index 0000000000..ac9c131df2 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/README.md @@ -0,0 +1,53 @@ +# reassign + +A linter that detects when reassigning a top-level variable in another package. + +## Install + +```bash +go install github.com/curioswitch/go-reassign +``` + +## Usage + +```bash +reassign ./... +``` + +Change the pattern to match against variables being reassigned. By default, only `EOF` and `Err*` variables are checked. + +```bash +reassign -pattern ".*" ./... +``` + +## Background + +Package variables are commonly used to define sentinel errors which callers can use with `errors.Is` to determine the +type of a returned `error`. Some examples exist in the standard [os](https://pkg.go.dev/os#pkg-variables) library. + +Unfortunately, as with any variable, these are mutable, and it is possible to write this very dangerous code. + +```go +package main +import "io" +func bad() { + // breaks file reading + io.EOF = nil +} +``` + +This caused a new pattern for [constant errors](https://dave.cheney.net/2016/04/07/constant-errors) +to gain popularity, but they don't work well with improvements to the `errors` package in recent versions of Go and may +be considered to be non-idiomatic compared to normal `errors.New`. If we can catch reassignment of sentinel errors, we +gain much of the safety of constant errors. + +This linter will catch reassignment of variables in other packages. By default it intends to apply to as many codebases +as possible and only checks a restricted set of variable names, `EOF` and `ErrFoo`, to restrict to sentinel errors. +Package variable reassignment is generally confusing, though, and we recommend avoiding it for all variables, not just errors. +The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is +recommended if it works with your code. + +## Limitations + +If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing +can be confusing, so it's recommended to rename the variable. diff --git a/vendor/github.com/curioswitch/go-reassign/analyzer.go b/vendor/github.com/curioswitch/go-reassign/analyzer.go new file mode 100644 index 0000000000..48707adebe --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/analyzer.go @@ -0,0 +1,13 @@ +package reassign + +import ( + "github.com/curioswitch/go-reassign/internal/analyzer" + "golang.org/x/tools/go/analysis" +) + +const FlagPattern = analyzer.FlagPattern + +// NewAnalyzer returns an analyzer for checking that package variables are not reassigned. +func NewAnalyzer() *analysis.Analyzer { + return analyzer.New() +} diff --git a/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go new file mode 100644 index 0000000000..e1b47d5b95 --- /dev/null +++ b/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go @@ -0,0 +1,84 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/types" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const FlagPattern = "pattern" + +func New() *analysis.Analyzer { + a := &analysis.Analyzer{ + Name: "reassign", + Doc: "Checks that package variables are not reassigned", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, + } + a.Flags.String(FlagPattern, `^(Err.*|EOF)$`, "Pattern to match package variables against to prevent reassignment") + return a +} + +func run(pass *analysis.Pass) (interface{}, error) { + checkRE, err := regexp.Compile(pass.Analyzer.Flags.Lookup(FlagPattern).Value.String()) + if err != nil { + return nil, fmt.Errorf("invalid pattern: %w", err) + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + inspect.Preorder([]ast.Node{(*ast.AssignStmt)(nil), (*ast.UnaryExpr)(nil)}, func(node ast.Node) { + switch node := node.(type) { + case *ast.AssignStmt: + for _, lhs := range node.Lhs { + reportImported(pass, lhs, checkRE, "reassigning") + } + default: + // TODO(chokoswitch): Consider handling operations other than assignment on globals, for example + // taking their address. + } + }) + return nil, nil +} + +func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) { + switch x := expr.(type) { + case *ast.SelectorExpr: + if !checkRE.MatchString(x.Sel.Name) { + return + } + + selectIdent, ok := x.X.(*ast.Ident) + if !ok { + return + } + + if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { + if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg { + return + } + } + + pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + + case *ast.Ident: + use, ok := pass.TypesInfo.Uses[x].(*types.Var) + if !ok { + return + } + + if use.Pkg() == pass.Pkg { + return + } + + if !checkRE.MatchString(x.Name) { + return + } + + pass.Reportf(expr.Pos(), "%s variable %s from other package %s", prefix, x.Name, use.Pkg().Path()) + } +} diff --git a/vendor/github.com/daixiang0/gci/pkg/config/config.go b/vendor/github.com/daixiang0/gci/pkg/config/config.go new file mode 100644 index 0000000000..51f6ccf3b7 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/config/config.go @@ -0,0 +1,90 @@ +package config + +import ( + "sort" + "strings" + + "gopkg.in/yaml.v3" + + "github.com/daixiang0/gci/pkg/section" +) + +var defaultOrder = map[string]int{ + section.StandardType: 0, + section.DefaultType: 1, + section.CustomType: 2, + section.BlankType: 3, + section.DotType: 4, + section.AliasType: 5, +} + +type BoolConfig struct { + NoInlineComments bool `yaml:"no-inlineComments"` + NoPrefixComments bool `yaml:"no-prefixComments"` + Debug bool `yaml:"-"` + SkipGenerated bool `yaml:"skipGenerated"` + SkipVendor bool `yaml:"skipVendor"` + CustomOrder bool `yaml:"customOrder"` +} + +type Config struct { + BoolConfig + Sections section.SectionList + SectionSeparators section.SectionList +} + +type YamlConfig struct { + Cfg BoolConfig `yaml:",inline"` + SectionStrings []string `yaml:"sections"` + SectionSeparatorStrings []string `yaml:"sectionseparators"` +} + +func (g YamlConfig) Parse() (*Config, error) { + var err error + + sections, err := section.Parse(g.SectionStrings) + if err != nil { + return nil, err + } + if sections == nil { + sections = section.DefaultSections() + } + + // if default order sorted sections + if !g.Cfg.CustomOrder { + sort.Slice(sections, func(i, j int) bool { + sectionI, sectionJ := sections[i].Type(), sections[j].Type() + + if strings.Compare(sectionI, sectionJ) == 0 { + return strings.Compare(sections[i].String(), sections[j].String()) < 0 + } + return defaultOrder[sectionI] < defaultOrder[sectionJ] + }) + } + + sectionSeparators, err := section.Parse(g.SectionSeparatorStrings) + if err != nil { + return nil, err + } + if sectionSeparators == nil { + sectionSeparators = section.DefaultSectionSeparators() + } + + return &Config{g.Cfg, sections, sectionSeparators}, nil +} + +func ParseConfig(in string) (*Config, error) { + config := YamlConfig{} + + err := yaml.Unmarshal([]byte(in), &config) + if err != nil { + return nil, err + } + + gciCfg, err := config.Parse() + if err != nil { + return nil, err + } + + return gciCfg, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/format/format.go b/vendor/github.com/daixiang0/gci/pkg/format/format.go new file mode 100644 index 0000000000..062701d2e4 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/format/format.go @@ -0,0 +1,46 @@ +package format + +import ( + "fmt" + + "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/log" + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/section" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Block struct { + Start, End int +} + +type resultMap map[string][]*Block + +func Format(data []*parse.GciImports, cfg *config.Config) (resultMap, error) { + result := make(resultMap, len(cfg.Sections)) + for _, d := range data { + // determine match specificity for every available section + var bestSection section.Section + var bestSectionSpecificity specificity.MatchSpecificity = specificity.MisMatch{} + for _, section := range cfg.Sections { + sectionSpecificity := section.MatchSpecificity(d) + if sectionSpecificity.IsMoreSpecific(specificity.MisMatch{}) && sectionSpecificity.Equal(bestSectionSpecificity) { + // specificity is identical + // return nil, section.EqualSpecificityMatchError{} + return nil, nil + } + if sectionSpecificity.IsMoreSpecific(bestSectionSpecificity) { + // better match found + bestSectionSpecificity = sectionSpecificity + bestSection = section + } + } + if bestSection == nil { + return nil, section.NoMatchingSectionForImportError{Imports: d} + } + log.L().Debug(fmt.Sprintf("Matched import %v to section %s", d, bestSection)) + result[bestSection.String()] = append(result[bestSection.String()], &Block{d.Start, d.End}) + } + + return result, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/gci.go b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go index 7efa576ca9..163e95a861 100644 --- a/vendor/github.com/daixiang0/gci/pkg/gci/gci.go +++ b/vendor/github.com/daixiang0/gci/pkg/gci/gci.go @@ -2,382 +2,228 @@ package gci import ( "bytes" + "errors" "fmt" - "io" - "io/ioutil" + goFormat "go/format" "os" - "os/exec" - "path/filepath" - "sort" - "strings" + "sync" + + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + "golang.org/x/sync/errgroup" + + "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/format" + "github.com/daixiang0/gci/pkg/io" + "github.com/daixiang0/gci/pkg/log" + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/section" + "github.com/daixiang0/gci/pkg/utils" ) -const ( - // pkg type: standard, remote, local - standard int = iota - // 3rd-party packages - remote - local - - commentFlag = "//" -) - -var ( - importStartFlag = []byte(` -import ( -`) - importEndFlag = []byte(` -) -`) -) - -type FlagSet struct { - LocalFlag []string - DoWrite, DoDiff *bool +func LocalFlagsToSections(localFlags []string) section.SectionList { + sections := section.DefaultSections() + // Add all local arguments as ImportPrefix sections + // for _, l := range localFlags { + // sections = append(sections, section.Section{l, nil, nil}) + // } + return sections } -type pkg struct { - list map[int][]string - comment map[string]string - alias map[string]string -} - -// ParseLocalFlag takes a comma-separated list of -// package-name-prefixes (as passed to the "-local" flag), and splits -// it in to a list. This is different than strings.Split in that it -// handles the empty string and empty entries in the list. -func ParseLocalFlag(str string) []string { - return strings.FieldsFunc(str, func(c rune) bool { return c == ',' }) +func PrintFormattedFiles(paths []string, cfg config.Config) error { + return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fmt.Print(string(formattedFile)) + return nil + }) } -func newPkg(data [][]byte, localFlag []string) *pkg { - listMap := make(map[int][]string) - commentMap := make(map[string]string) - aliasMap := make(map[string]string) - p := &pkg{ - list: listMap, - comment: commentMap, - alias: aliasMap, - } - - formatData := make([]string, 0) - // remove all empty lines - for _, v := range data { - if len(v) > 0 { - formatData = append(formatData, strings.TrimSpace(string(v))) - } - } - - n := len(formatData) - for i := n - 1; i >= 0; i-- { - line := formatData[i] - - // check commentFlag: - // 1. one line commentFlag - // 2. commentFlag after import path - commentIndex := strings.Index(line, commentFlag) - if commentIndex == 0 { - // comment in the last line is useless, ignore it - if i+1 >= n { - continue - } - pkg, _, _ := getPkgInfo(formatData[i+1], strings.Index(formatData[i+1], commentFlag) >= 0) - p.comment[pkg] = line - continue - } else if commentIndex > 0 { - pkg, alias, comment := getPkgInfo(line, true) - if alias != "" { - p.alias[pkg] = alias - } - - p.comment[pkg] = comment - pkgType := getPkgType(pkg, localFlag) - p.list[pkgType] = append(p.list[pkgType], pkg) - continue +func WriteFormattedFiles(paths []string, cfg config.Config) error { + return processGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + if bytes.Equal(unmodifiedFile, formattedFile) { + log.L().Debug(fmt.Sprintf("Skipping correctly formatted File: %s", filePath)) + return nil } - - pkg, alias, _ := getPkgInfo(line, false) - - if alias != "" { - p.alias[pkg] = alias - } - - pkgType := getPkgType(pkg, localFlag) - p.list[pkgType] = append(p.list[pkgType], pkg) - } - - return p + log.L().Info(fmt.Sprintf("Writing formatted File: %s", filePath)) + return os.WriteFile(filePath, formattedFile, 0o644) + }) } -// fmt format import pkgs as expected -func (p *pkg) fmt() []byte { - ret := make([]string, 0, 100) - - for pkgType := range []int{standard, remote, local} { - sort.Strings(p.list[pkgType]) - for _, s := range p.list[pkgType] { - if p.comment[s] != "" { - l := fmt.Sprintf("%s%s%s%s", linebreak, indent, p.comment[s], linebreak) - ret = append(ret, l) - } - - if p.alias[s] != "" { - s = fmt.Sprintf("%s%s%s%s%s", indent, p.alias[s], blank, s, linebreak) - } else { - s = fmt.Sprintf("%s%s%s", indent, s, linebreak) - } - - ret = append(ret, s) +func ListUnFormattedFiles(paths []string, cfg config.Config) error { + return processGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + if bytes.Equal(unmodifiedFile, formattedFile) { + return nil } - - if len(p.list[pkgType]) > 0 { - ret = append(ret, linebreak) - } - } - if len(ret) > 0 && ret[len(ret)-1] == linebreak { - ret = ret[:len(ret)-1] - } - - // remove duplicate empty lines - s1 := fmt.Sprintf("%s%s%s%s", linebreak, linebreak, linebreak, indent) - s2 := fmt.Sprintf("%s%s%s", linebreak, linebreak, indent) - return []byte(strings.ReplaceAll(strings.Join(ret, ""), s1, s2)) + fmt.Println(filePath) + return nil + }) } -// getPkgInfo assume line is a import path, and return (path, alias, comment) -func getPkgInfo(line string, comment bool) (string, string, string) { - if comment { - s := strings.Split(line, commentFlag) - pkgArray := strings.Split(s[0], blank) - if len(pkgArray) > 1 { - return pkgArray[1], pkgArray[0], fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1])) - } else { - return strings.TrimSpace(pkgArray[0]), "", fmt.Sprintf("%s%s%s", commentFlag, blank, strings.TrimSpace(s[1])) - } - } else { - pkgArray := strings.Split(line, blank) - if len(pkgArray) > 1 { - return pkgArray[1], pkgArray[0], "" - } else { - return pkgArray[0], "", "" - } - } +func DiffFormattedFiles(paths []string, cfg config.Config) error { + return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + fmt.Printf("%v", unifiedEdits) + return nil + }) +} + +func DiffFormattedFilesToArray(paths []string, cfg config.Config, diffs *[]string, lock *sync.Mutex) error { + log.InitLogger() + defer log.L().Sync() + return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + lock.Lock() + *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) + lock.Unlock() + return nil + }) } -func getPkgType(line string, localFlag []string) int { - pkgName := strings.Trim(line, "\"\\`") - - for _, localPkg := range localFlag { - if strings.HasPrefix(pkgName, localPkg) { - return local - } - } - - if isStandardPackage(pkgName) { - return standard - } +type fileFormattingFunc func(filePath string, unmodifiedFile, formattedFile []byte) error - return remote +func processStdInAndGoFilesInPaths(paths []string, cfg config.Config, fileFunc fileFormattingFunc) error { + return ProcessFiles(io.StdInGenerator.Combine(io.GoFilesInPathsGenerator(paths, cfg.SkipVendor)), cfg, fileFunc) } -const ( - blank = " " - indent = "\t" - linebreak = "\n" -) - -func diff(b1, b2 []byte, filename string) (data []byte, err error) { - f1, err := writeTempFile("", "gci", b1) - if err != nil { - return - } - defer os.Remove(f1) - - f2, err := writeTempFile("", "gci", b2) - if err != nil { - return - } - defer os.Remove(f2) - - cmd := "diff" - - data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - return replaceTempFilename(data, filename) - } - return +func processGoFilesInPaths(paths []string, cfg config.Config, fileFunc fileFormattingFunc) error { + return ProcessFiles(io.GoFilesInPathsGenerator(paths, cfg.SkipVendor), cfg, fileFunc) } -func writeTempFile(dir, prefix string, data []byte) (string, error) { - file, err := ioutil.TempFile(dir, prefix) +func ProcessFiles(fileGenerator io.FileGeneratorFunc, cfg config.Config, fileFunc fileFormattingFunc) error { + var taskGroup errgroup.Group + files, err := fileGenerator() if err != nil { - return "", err - } - _, err = file.Write(data) - if err1 := file.Close(); err == nil { - err = err1 - } - if err != nil { - os.Remove(file.Name()) - return "", err - } - return file.Name(), nil -} - -// replaceTempFilename replaces temporary filenames in diff with actual one. -// -// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 -// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 -// ... -// -> -// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 -// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 -// ... -func replaceTempFilename(diff []byte, filename string) ([]byte, error) { - bs := bytes.SplitN(diff, []byte{'\n'}, 3) - if len(bs) < 3 { - return nil, fmt.Errorf("got unexpected diff for %s", filename) - } - // Preserve timestamps. - var t0, t1 []byte - if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { - t0 = bs[0][i:] + return err } - if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { - t1 = bs[1][i:] + for _, file := range files { + // run file processing in parallel + taskGroup.Go(processingFunc(file, cfg, fileFunc)) } - // Always print filepath with slash separator. - f := filepath.ToSlash(filename) - bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) - bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) - return bytes.Join(bs, []byte{'\n'}), nil + return taskGroup.Wait() } -func visitFile(set *FlagSet) filepath.WalkFunc { - return func(path string, f os.FileInfo, err error) error { - if err == nil && isGoFile(f) { - err = processFile(path, os.Stdout, set) +func processingFunc(file io.FileObj, cfg config.Config, formattingFunc fileFormattingFunc) func() error { + return func() error { + unmodifiedFile, formattedFile, err := LoadFormatGoFile(file, cfg) + if err != nil { + // if errors.Is(err, FileParsingError{}) { + // // do not process files that are improperly formatted + // return nil + // } + return err } - return err + return formattingFunc(file.Path(), unmodifiedFile, formattedFile) } } -func WalkDir(path string, set *FlagSet) error { - return filepath.Walk(path, visitFile(set)) -} - -func isGoFile(f os.FileInfo) bool { - // ignore non-Go files - name := f.Name() - return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") -} +func LoadFormatGoFile(file io.FileObj, cfg config.Config) (src, dist []byte, err error) { + src, err = file.Load() + log.L().Debug(fmt.Sprintf("Loaded File: %s", file.Path())) + if err != nil { + return nil, nil, err + } -func ProcessFile(filename string, out io.Writer, set *FlagSet) error { - return processFile(filename, out, set) + return LoadFormat(src, file.Path(), cfg) } -func processFile(filename string, out io.Writer, set *FlagSet) error { - var err error +func LoadFormat(in []byte, path string, cfg config.Config) (src, dist []byte, err error) { + src = in - f, err := os.Open(filename) - if err != nil { - return err + if cfg.SkipGenerated && parse.IsGeneratedFileByComment(string(src)) { + return src, src, nil } - defer f.Close() - src, err := ioutil.ReadAll(f) + imports, headEnd, tailStart, cStart, cEnd, err := parse.ParseFile(src, path) if err != nil { - return err + if errors.Is(err, parse.NoImportError{}) { + return src, src, nil + } + return nil, nil, err } - ori := make([]byte, len(src)) - copy(ori, src) - start := bytes.Index(src, importStartFlag) - // in case no importStartFlag or importStartFlag exist in the commentFlag - if start < 0 { - fmt.Printf("skip file %s since no import\n", filename) - return nil + // do not do format if only one import + if len(imports) <= 1 { + return src, src, nil } - end := bytes.Index(src[start:], importEndFlag) + start - ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak)) + result, err := format.Format(imports, &cfg) + if err != nil { + return nil, nil, err + } - p := newPkg(ret, set.LocalFlag) + firstWithIndex := true - res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...) + var body []byte - if !bytes.Equal(ori, res) { - if *set.DoWrite { - // On Windows, we need to re-set the permissions from the file. See golang/go#38225. - var perms os.FileMode - if fi, err := os.Stat(filename); err == nil { - perms = fi.Mode() & os.ModePerm + // order by section list + for _, s := range cfg.Sections { + if len(result[s.String()]) > 0 { + if len(body) > 0 { + body = append(body, utils.Linebreak) } - err = ioutil.WriteFile(filename, res, perms) - if err != nil { - return err + for _, d := range result[s.String()] { + AddIndent(&body, &firstWithIndex) + body = append(body, src[d.Start:d.End]...) } } - if *set.DoDiff { - data, err := diff(ori, res, filename) - if err != nil { - return fmt.Errorf("failed to diff: %v", err) - } - fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) - if _, err := out.Write(data); err != nil { - return fmt.Errorf("failed to write: %v", err) - } - } - } - if !*set.DoWrite && !*set.DoDiff { - if _, err = out.Write(res); err != nil { - return fmt.Errorf("failed to write: %v", err) - } } - return err -} - -// Run return source and result in []byte if succeed -func Run(filename string, set *FlagSet) ([]byte, []byte, error) { - var err error + head := make([]byte, headEnd) + copy(head, src[:headEnd]) + tail := make([]byte, len(src)-tailStart) + copy(tail, src[tailStart:]) - f, err := os.Open(filename) - if err != nil { - return nil, nil, err + // ensure C + if cStart != 0 { + head = append(head, src[cStart:cEnd]...) + head = append(head, utils.Linebreak) } - defer f.Close() - src, err := ioutil.ReadAll(f) - if err != nil { - return nil, nil, err + // add beginning of import block + head = append(head, `import (`...) + head = append(head, utils.Linebreak) + // add end of import block + body = append(body, []byte{utils.RightParenthesis, utils.Linebreak}...) + + log.L().Debug(fmt.Sprintf("head:\n%s", head)) + log.L().Debug(fmt.Sprintf("body:\n%s", body)) + if len(tail) > 20 { + log.L().Debug(fmt.Sprintf("tail:\n%s", tail[:20])) + } else { + log.L().Debug(fmt.Sprintf("tail:\n%s", tail)) } - ori := make([]byte, len(src)) - copy(ori, src) - start := bytes.Index(src, importStartFlag) - // in case no importStartFlag or importStartFlag exist in the commentFlag - if start < 0 { - return nil, nil, nil + var totalLen int + slices := [][]byte{head, body, tail} + for _, s := range slices { + totalLen += len(s) } - end := bytes.Index(src[start:], importEndFlag) + start - - // in case import flags are part of a codegen template, or otherwise "wrong" - if start+len(importStartFlag) > end { - return nil, nil, nil + dist = make([]byte, totalLen) + var i int + for _, s := range slices { + i += copy(dist[i:], s) } - ret := bytes.Split(src[start+len(importStartFlag):end], []byte(linebreak)) + // remove ^M(\r\n) from Win to Unix + dist = bytes.ReplaceAll(dist, []byte{utils.WinLinebreak}, []byte{utils.Linebreak}) - p := newPkg(ret, set.LocalFlag) + log.L().Debug(fmt.Sprintf("raw:\n%s", dist)) + dist, err = goFormat.Source(dist) + if err != nil { + return nil, nil, err + } - res := append(src[:start+len(importStartFlag)], append(p.fmt(), src[end+1:]...)...) + return src, dist, nil +} - if bytes.Equal(ori, res) { - return ori, nil, nil +func AddIndent(in *[]byte, first *bool) { + if *first { + *first = false + return } - - return ori, res, nil + *in = append(*in, utils.Indent) } diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/testdata.go b/vendor/github.com/daixiang0/gci/pkg/gci/testdata.go new file mode 100644 index 0000000000..77c06dc630 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/gci/testdata.go @@ -0,0 +1,1278 @@ +package gci + +type Cases struct { + name, config, in, out string +} + +var commonConfig = `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +` + +var testCases = []Cases{ + { + "already-good", + + commonConfig, + + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "blank-format", + + commonConfig, + + `package main +import ( + "fmt" + + // comment + g "github.com/golang" // comment + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + // comment + g "github.com/golang" // comment + + "github.com/daixiang0/gci" +) +`, + }, + { + "cgo-block", + + commonConfig, + + `package main + +import ( + /* + #include "types.h" + */ + "C" +) +`, + `package main + +import ( + /* + #include "types.h" + */ + "C" +) +`, + }, + { + "cgo-block-after-import", + + commonConfig, + + `package main + +import ( + "fmt" + + "github.com/daixiang0/gci" + g "github.com/golang" +) + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" +`, + `package main + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "cgo-block-before-import", + + commonConfig, + + `package main + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" + +import ( + "fmt" + + "github.com/daixiang0/gci" + + g "github.com/golang" +) +`, + `package main + +// #cgo CFLAGS: -DPNG_DEBUG=1 +// #cgo amd64 386 CFLAGS: -DX86=1 +// #cgo LDFLAGS: -lpng +// #include +import "C" + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "cgo-block-mixed", + + commonConfig, + + `package main + +import ( + /* #include "types.h" + */"C" +) +`, + `package main + +import ( + /* #include "types.h" + */"C" +) +`, + }, + { + "cgo-block-mixed-with-content", + + commonConfig, + + `package main + +import ( + /* #include "types.h" + #include "other.h" */"C" +) +`, + `package main + +import ( + /* #include "types.h" + #include "other.h" */"C" +) +`, + }, + { + "cgo-block-prefix", + + commonConfig, + + `package main + +import ( + /* #include "types.h" */ "C" +) +`, + `package main + +import ( + /* #include "types.h" */ "C" +) +`, + }, + { + "cgo-block-single-line", + + commonConfig, + + `package main + +import ( + /* #include "types.h" */ + "C" +) +`, + `package main + +import ( + /* #include "types.h" */ + "C" +) +`, + }, + { + "cgo-line", + + commonConfig, + + `package main + +import ( + // #include "types.h" + "C" +) +`, + `package main + +import ( + // #include "types.h" + "C" +) +`, + }, + { + "cgo-multiline", + + commonConfig, + + `package main + +import ( + // #include "types.h" + // #include "other.h" + "C" +) +`, + `package main + +import ( + // #include "types.h" + // #include "other.h" + "C" +) +`, + }, + { + "cgo-single", + + commonConfig, + + `package main + +import ( + "fmt" + + "github.com/daixiang0/gci" +) + +import "C" + +import "github.com/golang" + +import ( + "github.com/daixiang0/gci" +) +`, + `package main + +import "C" + +import ( + "fmt" + + "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "comment", + + commonConfig, + + `package main +import ( + //Do not forget to run Gci + "fmt" +) +`, + `package main +import ( + //Do not forget to run Gci + "fmt" +) +`, + }, + { + "comment-before-import", + + commonConfig, + + `package main + +// comment +import ( + "fmt" + "os" + + "github.com/daixiang0/gci" +) +`, + `package main + +// comment +import ( + "fmt" + "os" + + "github.com/daixiang0/gci" +) +`, + }, + { + "comment-in-the-tail", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) + +type test int + +// test +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) + +type test int + +// test +`, + }, + { + "comment-top", + + commonConfig, + + `package main + +import ( + "os" // https://pkg.go.dev/os + // https://pkg.go.dev/fmt + "fmt" +) +`, + `package main + +import ( + // https://pkg.go.dev/fmt + "fmt" + "os" // https://pkg.go.dev/os +) +`, + }, + { + "comment-whithout-whitespace", + + commonConfig, + + `package proc + +import ( + "context"// no separating whitespace here //nolint:confusion +) +`, + `package proc + +import ( + "context"// no separating whitespace here //nolint:confusion +) +`, + }, + { + "comment-with-slashslash", + + commonConfig, + + `package main + +import ( + "fmt" // https://pkg.go.dev/fmt +) +`, + `package main + +import ( + "fmt" // https://pkg.go.dev/fmt +) +`, + }, + { + "custom-order", + + `customOrder: true +sections: + - Prefix(github.com/daixiang0) + - Default + - Standard +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" +) +`, + `package main + +import ( + "github.com/daixiang0/a" + + g "github.com/golang" + + "fmt" +) +`, + }, + { + "default-order", + + `sections: + - Standard + - Prefix(github.com/daixiang0) + - Default +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" +) +`, + }, + { + "dot-and-blank", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) + - Blank + - Dot +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + . "github.com/golang/dot" + _ "github.com/golang/blank" + + "github.com/daixiang0/a" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + . "github.com/daixiang0/gci/dot" + _ "github.com/daixiang0/gci/blank" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + + _ "github.com/daixiang0/gci/blank" + _ "github.com/golang/blank" + + . "github.com/daixiang0/gci/dot" + . "github.com/golang/dot" +) +`, + }, + { + "duplicate-imports", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + a "github.com/daixiang0/gci" + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + a "github.com/daixiang0/gci" +) +`, + }, + { + "grouped-multiple-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0,gitlab.com/daixiang0,daixiang0) +`, + `package main + +import ( + "daixiang0/lib1" + "fmt" + "github.com/daixiang0/gci" + "gitlab.com/daixiang0/gci" + g "github.com/golang" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "daixiang0/lib1" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + "gitlab.com/daixiang0/gci" +) +`, + }, + { + "leading-comment", + + commonConfig, + + `package main + +import ( + // foo + "fmt" +) +`, + `package main + +import ( + // foo + "fmt" +) +`, + }, + { + "linebreak", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + g "github.com/golang" + + "fmt" + + "github.com/daixiang0/gci" + +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "linebreak-no-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + g "github.com/golang" + + "fmt" + +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" +) +`, + }, + { + "mismatch-section", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) + - Prefix(github.com/daixiang0/gci) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "multiple-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/a" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "multiple-imports", + + commonConfig, + + `package main + +import "fmt" + +import "context" + +import ( + "os" + + "github.com/daixiang0/test" +) + +import "math" + + +// main +func main() { +} +`, + `package main + +import ( + "context" + "fmt" + "math" + "os" + + "github.com/daixiang0/test" +) + +// main +func main() { +} +`, + }, + { + "multiple-line-comment", + + commonConfig, + + `package proc + +import ( + "context" // in-line comment + "fmt" + "os" + + //nolint:depguard // A multi-line comment explaining why in + // this one case it's OK to use os/exec even though depguard + // is configured to force us to use dlib/exec instead. + "os/exec" + + "golang.org/x/sys/unix" + "github.com/local/dlib/dexec" +) +`, + `package proc + +import ( + "context" // in-line comment + "fmt" + "os" + //nolint:depguard // A multi-line comment explaining why in + // this one case it's OK to use os/exec even though depguard + // is configured to force us to use dlib/exec instead. + "os/exec" + + "github.com/local/dlib/dexec" + "golang.org/x/sys/unix" +) +`, + }, + { + "nochar-after-import", + + commonConfig, + + `package main + +import ( + "fmt" +) +`, + `package main + +import ( + "fmt" +) +`, + }, + { + "no-format", + + commonConfig, + + `package main + +import( +"fmt" + +g "github.com/golang" + +"github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "nolint", + + commonConfig, + + `package main + +import ( + "fmt" + + "github.com/forbidden/pkg" //nolint:depguard + + _ "github.com/daixiang0/gci" //nolint:depguard +) +`, + `package main + +import ( + "fmt" + + "github.com/forbidden/pkg" //nolint:depguard + + _ "github.com/daixiang0/gci" //nolint:depguard +) +`, + }, + { + "number-in-alias", + + commonConfig, + + `package main + +import ( + "fmt" + + go_V1 "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + go_V1 "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "one-import", + + commonConfig, + + `package main +import ( + "fmt" +) + +func main() { +} +`, + `package main +import ( + "fmt" +) + +func main() { +} +`, + }, + { + "one-import-one-line", + + commonConfig, + + `package main + +import "fmt" + +func main() { +} +`, + `package main + +import "fmt" + +func main() { +} +`, + }, + { + "one-line-import-after-import", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0) +`, + `package main + +import ( + "fmt" + "os" + + "github.com/daixiang0/test" +) + +import "context" +`, + `package main + +import ( + "context" + "fmt" + "os" + + "github.com/daixiang0/test" +) +`, + }, + { + "same-prefix-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "simple-case", + + commonConfig, + + `package main + +import ( + "golang.org/x/tools" + + "fmt" + + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + "golang.org/x/tools" + + "github.com/daixiang0/gci" +) +`, + }, + { + "whitespace-test", + + commonConfig, + + `package main + +import ( + "fmt" + "github.com/golang" // golang + alias "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + "github.com/golang" // golang + + alias "github.com/daixiang0/gci" +) +`, + }, + { + "with-above-comment-and-alias", + + commonConfig, + + `package main + +import ( + "fmt" + // golang + _ "github.com/golang" + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + // golang + _ "github.com/golang" + + "github.com/daixiang0/gci" +) +`, + }, + { + "with-comment-and-alias", + + commonConfig, + + `package main + +import ( + "fmt" + _ "github.com/golang" // golang + "github.com/daixiang0/gci" +) +`, + `package main + +import ( + "fmt" + + _ "github.com/golang" // golang + + "github.com/daixiang0/gci" +) +`, + }, + { + "same-prefix-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "same-prefix-custom", + + `sections: + - Standard + - Default + - Prefix(github.com/daixiang0/gci) + - Prefix(github.com/daixiang0/gci/subtest) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "blank-in-config", + + `sections: + - Standard + - Default + - Prefix( github.com/daixiang0/gci, github.com/daixiang0/gci/subtest ) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + }, + { + "alias", + + `sections: + - Standard + - Default + - Alias +`, + `package main + +import ( + testing "github.com/daixiang0/test" + "fmt" + + g "github.com/golang" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" +) +`, + `package main + +import ( + "fmt" + + "github.com/daixiang0/gci" + "github.com/daixiang0/gci/subtest" + + testing "github.com/daixiang0/test" + g "github.com/golang" +) +`, + }, +} diff --git a/vendor/github.com/daixiang0/gci/pkg/io/file.go b/vendor/github.com/daixiang0/gci/pkg/io/file.go new file mode 100644 index 0000000000..79950792ca --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/io/file.go @@ -0,0 +1,64 @@ +package io + +import "io/ioutil" + +// FileObj allows mocking the access to files +type FileObj interface { + Load() ([]byte, error) + Path() string +} + +// File represents a file that can be loaded from the file system +type File struct { + FilePath string +} + +func (f File) Path() string { + return f.FilePath +} + +func (f File) Load() ([]byte, error) { + return ioutil.ReadFile(f.FilePath) +} + +// FileGeneratorFunc returns a list of files that can be loaded and processed +type FileGeneratorFunc func() ([]FileObj, error) + +func (a FileGeneratorFunc) Combine(b FileGeneratorFunc) FileGeneratorFunc { + return func() ([]FileObj, error) { + files, err := a() + if err != nil { + return nil, err + } + additionalFiles, err := b() + if err != nil { + return nil, err + } + files = append(files, additionalFiles...) + return files, err + } +} + +func GoFilesInPathsGenerator(paths []string, skipVendor bool) FileGeneratorFunc { + checkFunc := isGoFile + if skipVendor { + checkFunc = checkChains(isGoFile, isOutsideVendorDir) + } + + return FilesInPathsGenerator(paths, checkFunc) +} + +func FilesInPathsGenerator(paths []string, fileCheckFun fileCheckFunction) FileGeneratorFunc { + return func() (foundFiles []FileObj, err error) { + for _, path := range paths { + files, err := FindFilesForPath(path, fileCheckFun) + if err != nil { + return nil, err + } + for _, filePath := range files { + foundFiles = append(foundFiles, File{filePath}) + } + } + return foundFiles, nil + } +} diff --git a/vendor/github.com/daixiang0/gci/pkg/io/search.go b/vendor/github.com/daixiang0/gci/pkg/io/search.go new file mode 100644 index 0000000000..cd821582e7 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/io/search.go @@ -0,0 +1,77 @@ +package io + +import ( + "io/fs" + "os" + "path/filepath" +) + +type fileCheckFunction func(path string, file os.FileInfo) bool + +func FindFilesForPath(path string, fileCheckFun fileCheckFunction) ([]string, error) { + switch entry, err := os.Stat(path); { + case err != nil: + return nil, err + case entry.IsDir(): + return findFilesForDirectory(path, fileCheckFun) + case fileCheckFun(path, entry): + return []string{filepath.Clean(path)}, nil + default: + return []string{}, nil + } +} + +func findFilesForDirectory(dirPath string, fileCheckFun fileCheckFunction) ([]string, error) { + var filePaths []string + err := filepath.WalkDir(dirPath, func(path string, entry fs.DirEntry, err error) error { + if err != nil { + return err + } + file, err := entry.Info() + if err != nil { + return err + } + if !entry.IsDir() && fileCheckFun(path, file) { + filePaths = append(filePaths, filepath.Clean(path)) + } + return nil + }) + if err != nil { + return nil, err + } + return filePaths, nil +} + +func isGoFile(_ string, file os.FileInfo) bool { + return !file.IsDir() && filepath.Ext(file.Name()) == ".go" +} + +func isOutsideVendorDir(path string, _ os.FileInfo) bool { + for { + base := filepath.Base(path) + if base == "vendor" { + return false + } + + prevPath := path + path = filepath.Dir(path) + + if prevPath == path { + break + } + } + + return true +} + +func checkChains(funcs ...fileCheckFunction) fileCheckFunction { + return func(path string, file os.FileInfo) bool { + for _, checkFunc := range funcs { + if !checkFunc(path, file) { + return false + } + } + + return true + } +} diff --git a/vendor/github.com/daixiang0/gci/pkg/io/stdin.go b/vendor/github.com/daixiang0/gci/pkg/io/stdin.go new file mode 100644 index 0000000000..ccab2844f4 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/io/stdin.go @@ -0,0 +1,27 @@ +package io + +import ( + "io/ioutil" + "os" +) + +type stdInFile struct{} + +func (s stdInFile) Load() ([]byte, error) { + return ioutil.ReadAll(os.Stdin) +} + +func (s stdInFile) Path() string { + return "StdIn" +} + +var StdInGenerator FileGeneratorFunc = func() ([]FileObj, error) { + stat, err := os.Stdin.Stat() + if err != nil { + return nil, err + } + if (stat.Mode() & os.ModeCharDevice) == 0 { + return []FileObj{stdInFile{}}, nil + } + return []FileObj{}, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/log/log.go b/vendor/github.com/daixiang0/gci/pkg/log/log.go new file mode 100644 index 0000000000..ab33739ca3 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/log/log.go @@ -0,0 +1,50 @@ +package log + +import ( + "sync" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Use L to log with Zap +var logger *zap.Logger + +// Keep the config to reference the atomicLevel for changing levels +var logConfig zap.Config + +var doOnce sync.Once + +// InitLogger sets up the logger +func InitLogger() { + doOnce.Do(func() { + logConfig = zap.NewDevelopmentConfig() + + logConfig.EncoderConfig.TimeKey = "timestamp" + logConfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + logConfig.Level.SetLevel(zapcore.InfoLevel) + logConfig.OutputPaths = []string{"stderr"} + + var err error + logger, err = logConfig.Build() + if err != nil { + panic(err) + } + }) +} + +// SetLevel allows you to set the level of the default gci logger. +// This will not work if you replace the logger +func SetLevel(level zapcore.Level) { + logConfig.Level.SetLevel(level) +} + +// L returns the logger +func L() *zap.Logger { + return logger +} + +// SetLogger allows you to set the logger to whatever you want +func SetLogger(l *zap.Logger) { + logger = l +} diff --git a/vendor/github.com/daixiang0/gci/pkg/parse/parse.go b/vendor/github.com/daixiang0/gci/pkg/parse/parse.go new file mode 100644 index 0000000000..33d6e1705d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/parse/parse.go @@ -0,0 +1,197 @@ +package parse + +import ( + "go/ast" + "go/parser" + "go/token" + "sort" + "strings" +) + +const C = "\"C\"" + +type GciImports struct { + // original index of import group, include doc, name, path and comment + Start, End int + Name, Path string +} +type ImportList []*GciImports + +func (l ImportList) Len() int { + return len(l) +} + +func (l ImportList) Less(i, j int) bool { + if strings.Compare(l[i].Path, l[j].Path) == 0 { + return strings.Compare(l[i].Name, l[j].Name) < 0 + } + + return strings.Compare(l[i].Path, l[j].Path) < 0 +} + +func (l ImportList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +/* + * AST considers a import block as below: + * ``` + * Doc + * Name Path Comment + * ``` + * An example is like below: + * ``` + * // test + * test "fmt" // test + * ``` + * getImports return a import block with name, start and end index + */ +func getImports(imp *ast.ImportSpec) (start, end int, name string) { + if imp.Doc != nil { + // doc poc need minus one to get the first index of comment + start = int(imp.Doc.Pos()) - 1 + } else { + if imp.Name != nil { + // name pos need minus one too + start = int(imp.Name.Pos()) - 1 + } else { + // path pos start without quote, need minus one for it + start = int(imp.Path.Pos()) - 1 + } + } + + if imp.Name != nil { + name = imp.Name.Name + } + + if imp.Comment != nil { + end = int(imp.Comment.End()) + } else { + end = int(imp.Path.End()) + } + return +} + +func ParseFile(src []byte, filename string) (ImportList, int, int, int, int, error) { + fileSet := token.NewFileSet() + f, err := parser.ParseFile(fileSet, filename, src, parser.ParseComments) + if err != nil { + return nil, 0, 0, 0, 0, err + } + + if len(f.Imports) == 0 { + return nil, 0, 0, 0, 0, NoImportError{} + } + + var ( + // headEnd means the start of import block + headEnd int + // tailStart means the end + 1 of import block + tailStart int + // cStart means the start of C import block + cStart int + // cEnd means the end of C import block + cEnd int + data ImportList + ) + + for index, decl := range f.Decls { + switch decl.(type) { + // skip BadDecl and FuncDecl + case *ast.GenDecl: + genDecl := decl.(*ast.GenDecl) + + if genDecl.Tok == token.IMPORT { + // there are two cases, both end with linebreak: + // 1. + // import ( + // "xxxx" + // ) + // 2. + // import "xxx" + if headEnd == 0 { + headEnd = int(decl.Pos()) - 1 + } + tailStart = int(decl.End()) + + for _, spec := range genDecl.Specs { + imp := spec.(*ast.ImportSpec) + // there are only one C import block + // ensure C import block is the first import block + if imp.Path.Value == C { + /* + common case: + + // #include + import "C" + + notice that decl.Pos() == genDecl.Pos() > genDecl.Doc.Pos() + */ + if genDecl.Doc != nil { + cStart = int(genDecl.Doc.Pos()) - 1 + // if C import block is the first, update headEnd + if index == 0 { + headEnd = cStart + } + } else { + /* + special case: + + import "C" + */ + cStart = int(decl.Pos()) - 1 + } + + cEnd = int(decl.End()) + + continue + } + + start, end, name := getImports(imp) + + data = append(data, &GciImports{ + Start: start, + End: end, + Name: name, + Path: strings.Trim(imp.Path.Value, `"`), + }) + } + } + } + } + + sort.Sort(data) + return data, headEnd, tailStart, cStart, cEnd, nil +} + +// IsGeneratedFileByComment reports whether the source file is generated code. +// Using a bit laxer rules than https://golang.org/s/generatedcode to +// match more generated code. +// Taken from https://github.com/golangci/golangci-lint. +func IsGeneratedFileByComment(in string) bool { + const ( + genCodeGenerated = "code generated" + genDoNotEdit = "do not edit" + genAutoFile = "autogenerated file" // easyjson + genAutoGenerated = "automatically generated" // genny + ) + + markers := []string{genCodeGenerated, genDoNotEdit, genAutoFile, genAutoGenerated} + in = strings.ToLower(in) + for _, marker := range markers { + if strings.Contains(in, marker) { + return true + } + } + + return false +} + +type NoImportError struct{} + +func (n NoImportError) Error() string { + return "No imports" +} + +func (i NoImportError) Is(err error) bool { + _, ok := err.(NoImportError) + return ok +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/alias.go b/vendor/github.com/daixiang0/gci/pkg/section/alias.go new file mode 100644 index 0000000000..423e96acf0 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/alias.go @@ -0,0 +1,25 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Alias struct{} + +const AliasType = "alias" + +func (b Alias) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if spec.Name != "." && spec.Name != "_" && spec.Name != "" { + return specificity.NameMatch{} + } + return specificity.MisMatch{} +} + +func (b Alias) String() string { + return AliasType +} + +func (b Alias) Type() string { + return AliasType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/blank.go b/vendor/github.com/daixiang0/gci/pkg/section/blank.go new file mode 100644 index 0000000000..4a2741773d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/blank.go @@ -0,0 +1,25 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Blank struct{} + +const BlankType = "blank" + +func (b Blank) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if spec.Name == "_" { + return specificity.NameMatch{} + } + return specificity.MisMatch{} +} + +func (b Blank) String() string { + return BlankType +} + +func (b Blank) Type() string { + return BlankType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/commentline.go b/vendor/github.com/daixiang0/gci/pkg/section/commentline.go new file mode 100644 index 0000000000..c3ddd08249 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/commentline.go @@ -0,0 +1,24 @@ +package section + +import ( + "fmt" + + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type CommentLine struct { + Comment string +} + +func (c CommentLine) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + return specificity.MisMatch{} +} + +func (c CommentLine) String() string { + return fmt.Sprintf("commentline(%s)", c.Comment) +} + +func (c CommentLine) Type() string { + return "commentline" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/default.go b/vendor/github.com/daixiang0/gci/pkg/section/default.go new file mode 100644 index 0000000000..3af07a0927 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/default.go @@ -0,0 +1,22 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const DefaultType = "default" + +type Default struct{} + +func (d Default) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + return specificity.Default{} +} + +func (d Default) String() string { + return DefaultType +} + +func (d Default) Type() string { + return DefaultType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/dot.go b/vendor/github.com/daixiang0/gci/pkg/section/dot.go new file mode 100644 index 0000000000..8112eeb1dc --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/dot.go @@ -0,0 +1,25 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Dot struct{} + +const DotType = "dot" + +func (d Dot) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if spec.Name == "." { + return specificity.NameMatch{} + } + return specificity.MisMatch{} +} + +func (d Dot) String() string { + return DotType +} + +func (d Dot) Type() string { + return DotType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/errors.go b/vendor/github.com/daixiang0/gci/pkg/section/errors.go new file mode 100644 index 0000000000..0a12091356 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/errors.go @@ -0,0 +1,107 @@ +package section + +import ( + "errors" + "fmt" + + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/utils" +) + +type SectionParsingError struct { + error +} + +func (s SectionParsingError) Unwrap() error { + return s.error +} + +func (s SectionParsingError) Wrap(sectionStr string) error { + return fmt.Errorf("failed to parse section %q: %w", sectionStr, s) +} + +func (s SectionParsingError) Is(err error) bool { + _, ok := err.(SectionParsingError) + return ok +} + +var MissingParameterClosingBracketsError = fmt.Errorf("section parameter is missing closing %q", utils.RightParenthesis) + +var MoreThanOneOpeningQuotesError = fmt.Errorf("found more than one %q parameter start sequences", utils.RightParenthesis) + +var SectionTypeDoesNotAcceptParametersError = errors.New("section type does not accept a parameter") + +var SectionTypeDoesNotAcceptPrefixError = errors.New("section may not contain a Prefix") + +var SectionTypeDoesNotAcceptSuffixError = errors.New("section may not contain a Suffix") + +type EqualSpecificityMatchError struct { + Imports *parse.GciImports + SectionA, SectionB Section +} + +func (e EqualSpecificityMatchError) Error() string { + return fmt.Sprintf("Import %v matched section %s and %s equally", e.Imports, e.SectionA, e.SectionB) +} + +func (e EqualSpecificityMatchError) Is(err error) bool { + _, ok := err.(EqualSpecificityMatchError) + return ok +} + +type NoMatchingSectionForImportError struct { + Imports *parse.GciImports +} + +func (n NoMatchingSectionForImportError) Error() string { + return fmt.Sprintf("No section found for Import: %v", n.Imports) +} + +func (n NoMatchingSectionForImportError) Is(err error) bool { + _, ok := err.(NoMatchingSectionForImportError) + return ok +} + +type InvalidImportSplitError struct { + segments []string +} + +func (i InvalidImportSplitError) Error() string { + return fmt.Sprintf("separating the inline comment from the import yielded an invalid number of segments: %v", i.segments) +} + +func (i InvalidImportSplitError) Is(err error) bool { + _, ok := err.(InvalidImportSplitError) + return ok +} + +type InvalidAliasSplitError struct { + segments []string +} + +func (i InvalidAliasSplitError) Error() string { + return fmt.Sprintf("separating the alias from the path yielded an invalid number of segments: %v", i.segments) +} + +func (i InvalidAliasSplitError) Is(err error) bool { + _, ok := err.(InvalidAliasSplitError) + return ok +} + +var ( + MissingImportStatementError = FileParsingError{errors.New("no import statement present in File")} + ImportStatementNotClosedError = FileParsingError{errors.New("import statement not closed")} +) + +type FileParsingError struct { + error +} + +func (f FileParsingError) Unwrap() error { + return f.error +} + +func (f FileParsingError) Is(err error) bool { + _, ok := err.(FileParsingError) + return ok +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/newline.go b/vendor/github.com/daixiang0/gci/pkg/section/newline.go new file mode 100644 index 0000000000..4bff91b9d4 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/newline.go @@ -0,0 +1,22 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const newLineName = "newline" + +type NewLine struct{} + +func (n NewLine) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + return specificity.MisMatch{} +} + +func (n NewLine) String() string { + return newLineName +} + +func (n NewLine) Type() string { + return newLineName +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/parser.go b/vendor/github.com/daixiang0/gci/pkg/section/parser.go new file mode 100644 index 0000000000..38435f540e --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/parser.go @@ -0,0 +1,46 @@ +package section + +import ( + "errors" + "fmt" + "strings" +) + +func Parse(data []string) (SectionList, error) { + if len(data) == 0 { + return nil, nil + } + + var list SectionList + var errString string + for _, d := range data { + s := strings.ToLower(d) + if len(s) == 0 { + return nil, nil + } + + if s == "default" { + list = append(list, Default{}) + } else if s == "standard" { + list = append(list, Standard{}) + } else if s == "newline" { + list = append(list, NewLine{}) + } else if strings.HasPrefix(s, "prefix(") && len(d) > 8 { + list = append(list, Custom{d[7 : len(d)-1]}) + } else if strings.HasPrefix(s, "commentline(") && len(d) > 13 { + list = append(list, Custom{d[12 : len(d)-1]}) + } else if s == "dot" { + list = append(list, Dot{}) + } else if s == "blank" { + list = append(list, Blank{}) + } else if s == "alias" { + list = append(list, Alias{}) + } else { + errString += fmt.Sprintf(" %s", s) + } + } + if errString != "" { + return nil, errors.New(fmt.Sprintf("invalid params:%s", errString)) + } + return list, nil +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/prefix.go b/vendor/github.com/daixiang0/gci/pkg/section/prefix.go new file mode 100644 index 0000000000..30bdd8f4ea --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/prefix.go @@ -0,0 +1,38 @@ +package section + +import ( + "fmt" + "strings" + + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +type Custom struct { + Prefix string +} + +// CustomSeparator allows you to group multiple custom prefix together in the same section +// gci diff -s standard -s default -s prefix(github.com/company,gitlab.com/company,companysuffix) +const CustomSeparator = "," + +const CustomType = "custom" + +func (c Custom) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + for _, prefix := range strings.Split(c.Prefix, CustomSeparator) { + prefix = strings.TrimSpace(prefix) + if strings.HasPrefix(spec.Path, prefix) { + return specificity.Match{Length: len(prefix)} + } + } + + return specificity.MisMatch{} +} + +func (c Custom) String() string { + return fmt.Sprintf("prefix(%s)", c.Prefix) +} + +func (c Custom) Type() string { + return CustomType +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/section.go b/vendor/github.com/daixiang0/gci/pkg/section/section.go new file mode 100644 index 0000000000..cc0a43f2f1 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/section.go @@ -0,0 +1,36 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +// Section defines a part of the formatted output. +type Section interface { + // MatchSpecificity returns how well an Import matches to this Section + MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity + + // String Implements the stringer interface + String() string + + // return section type + Type() string +} + +type SectionList []Section + +func (list SectionList) String() []string { + var output []string + for _, section := range list { + output = append(output, section.String()) + } + return output +} + +func DefaultSections() SectionList { + return SectionList{Standard{}, Default{}} +} + +func DefaultSectionSeparators() SectionList { + return SectionList{NewLine{}} +} diff --git a/vendor/github.com/daixiang0/gci/pkg/section/standard.go b/vendor/github.com/daixiang0/gci/pkg/section/standard.go new file mode 100644 index 0000000000..26c7e9dc7d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/section/standard.go @@ -0,0 +1,30 @@ +package section + +import ( + "github.com/daixiang0/gci/pkg/parse" + "github.com/daixiang0/gci/pkg/specificity" +) + +const StandardType = "standard" + +type Standard struct{} + +func (s Standard) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity { + if isStandard(spec.Path) { + return specificity.StandardMatch{} + } + return specificity.MisMatch{} +} + +func (s Standard) String() string { + return StandardType +} + +func (s Standard) Type() string { + return StandardType +} + +func isStandard(pkg string) bool { + _, ok := standardPackages[pkg] + return ok +} diff --git a/vendor/github.com/daixiang0/gci/pkg/gci/std.go b/vendor/github.com/daixiang0/gci/pkg/section/standard_list.go similarity index 91% rename from vendor/github.com/daixiang0/gci/pkg/gci/std.go rename to vendor/github.com/daixiang0/gci/pkg/section/standard_list.go index ac96b55ab1..05e7999396 100644 --- a/vendor/github.com/daixiang0/gci/pkg/gci/std.go +++ b/vendor/github.com/daixiang0/gci/pkg/section/standard_list.go @@ -1,12 +1,14 @@ -package gci +package section -// Code generated based on go1.16beta1. DO NOT EDIT. +// Code generated based on go1.21.0 X:arenas. DO NOT EDIT. var standardPackages = map[string]struct{}{ "archive/tar": {}, "archive/zip": {}, + "arena": {}, "bufio": {}, "bytes": {}, + "cmp": {}, "compress/bzip2": {}, "compress/flate": {}, "compress/gzip": {}, @@ -21,6 +23,7 @@ var standardPackages = map[string]struct{}{ "crypto/cipher": {}, "crypto/des": {}, "crypto/dsa": {}, + "crypto/ecdh": {}, "crypto/ecdsa": {}, "crypto/ed25519": {}, "crypto/elliptic": {}, @@ -38,6 +41,7 @@ var standardPackages = map[string]struct{}{ "crypto/x509/pkix": {}, "database/sql": {}, "database/sql/driver": {}, + "debug/buildinfo": {}, "debug/dwarf": {}, "debug/elf": {}, "debug/gosym": {}, @@ -63,8 +67,10 @@ var standardPackages = map[string]struct{}{ "fmt": {}, "go/ast": {}, "go/build": {}, + "go/build/constraint": {}, "go/constant": {}, "go/doc": {}, + "go/doc/comment": {}, "go/format": {}, "go/importer": {}, "go/parser": {}, @@ -92,7 +98,9 @@ var standardPackages = map[string]struct{}{ "io/fs": {}, "io/ioutil": {}, "log": {}, + "log/slog": {}, "log/syslog": {}, + "maps": {}, "math": {}, "math/big": {}, "math/bits": {}, @@ -111,6 +119,7 @@ var standardPackages = map[string]struct{}{ "net/http/httputil": {}, "net/http/pprof": {}, "net/mail": {}, + "net/netip": {}, "net/rpc": {}, "net/rpc/jsonrpc": {}, "net/smtp": {}, @@ -128,11 +137,13 @@ var standardPackages = map[string]struct{}{ "regexp/syntax": {}, "runtime": {}, "runtime/cgo": {}, + "runtime/coverage": {}, "runtime/debug": {}, "runtime/metrics": {}, "runtime/pprof": {}, "runtime/race": {}, "runtime/trace": {}, + "slices": {}, "sort": {}, "strconv": {}, "strings": {}, @@ -143,6 +154,7 @@ var standardPackages = map[string]struct{}{ "testing/fstest": {}, "testing/iotest": {}, "testing/quick": {}, + "testing/slogtest": {}, "text/scanner": {}, "text/tabwriter": {}, "text/template": {}, @@ -154,8 +166,3 @@ var standardPackages = map[string]struct{}{ "unicode/utf8": {}, "unsafe": {}, } - -func isStandardPackage(pkg string) bool { - _, ok := standardPackages[pkg] - return ok -} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/default.go b/vendor/github.com/daixiang0/gci/pkg/specificity/default.go new file mode 100644 index 0000000000..f7ae4b87bc --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/default.go @@ -0,0 +1,19 @@ +package specificity + +type Default struct{} + +func (d Default) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(d, than) +} + +func (d Default) Equal(to MatchSpecificity) bool { + return equalSpecificity(d, to) +} + +func (d Default) class() specificityClass { + return DefaultClass +} + +func (d Default) String() string { + return "Default" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/match.go b/vendor/github.com/daixiang0/gci/pkg/specificity/match.go new file mode 100644 index 0000000000..f08d2b66bb --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/match.go @@ -0,0 +1,24 @@ +package specificity + +import "fmt" + +type Match struct { + Length int +} + +func (m Match) IsMoreSpecific(than MatchSpecificity) bool { + otherMatch, isMatch := than.(Match) + return isMoreSpecific(m, than) || (isMatch && m.Length > otherMatch.Length) +} + +func (m Match) Equal(to MatchSpecificity) bool { + return equalSpecificity(m, to) +} + +func (m Match) class() specificityClass { + return MatchClass +} + +func (m Match) String() string { + return fmt.Sprintf("Match(length: %d)", m.Length) +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/mismatch.go b/vendor/github.com/daixiang0/gci/pkg/specificity/mismatch.go new file mode 100644 index 0000000000..8e87111461 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/mismatch.go @@ -0,0 +1,19 @@ +package specificity + +type MisMatch struct{} + +func (m MisMatch) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(m, than) +} + +func (m MisMatch) Equal(to MatchSpecificity) bool { + return equalSpecificity(m, to) +} + +func (m MisMatch) class() specificityClass { + return MisMatchClass +} + +func (m MisMatch) String() string { + return "Mismatch" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/name.go b/vendor/github.com/daixiang0/gci/pkg/specificity/name.go new file mode 100644 index 0000000000..1900a0ac5d --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/name.go @@ -0,0 +1,19 @@ +package specificity + +type NameMatch struct{} + +func (n NameMatch) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(n, than) +} + +func (n NameMatch) Equal(to MatchSpecificity) bool { + return equalSpecificity(n, to) +} + +func (n NameMatch) class() specificityClass { + return NameClass +} + +func (n NameMatch) String() string { + return "Name" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/specificity.go b/vendor/github.com/daixiang0/gci/pkg/specificity/specificity.go new file mode 100644 index 0000000000..842da18579 --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/specificity.go @@ -0,0 +1,27 @@ +package specificity + +type specificityClass int + +const ( + MisMatchClass = 0 + DefaultClass = 10 + StandardClass = 20 + MatchClass = 30 + NameClass = 40 +) + +// MatchSpecificity is used to determine which section matches an import best +type MatchSpecificity interface { + IsMoreSpecific(than MatchSpecificity) bool + Equal(to MatchSpecificity) bool + class() specificityClass +} + +func isMoreSpecific(this, than MatchSpecificity) bool { + return this.class() > than.class() +} + +func equalSpecificity(base, to MatchSpecificity) bool { + // m.class() == to.class() would not work for Match + return !base.IsMoreSpecific(to) && !to.IsMoreSpecific(base) +} diff --git a/vendor/github.com/daixiang0/gci/pkg/specificity/standard.go b/vendor/github.com/daixiang0/gci/pkg/specificity/standard.go new file mode 100644 index 0000000000..72ccaf7e1e --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/specificity/standard.go @@ -0,0 +1,19 @@ +package specificity + +type StandardMatch struct{} + +func (s StandardMatch) IsMoreSpecific(than MatchSpecificity) bool { + return isMoreSpecific(s, than) +} + +func (s StandardMatch) Equal(to MatchSpecificity) bool { + return equalSpecificity(s, to) +} + +func (s StandardMatch) class() specificityClass { + return StandardClass +} + +func (s StandardMatch) String() string { + return "Standard" +} diff --git a/vendor/github.com/daixiang0/gci/pkg/utils/constants.go b/vendor/github.com/daixiang0/gci/pkg/utils/constants.go new file mode 100644 index 0000000000..2fafbc32cc --- /dev/null +++ b/vendor/github.com/daixiang0/gci/pkg/utils/constants.go @@ -0,0 +1,12 @@ +package utils + +const ( + Indent = '\t' + Linebreak = '\n' + WinLinebreak = '\r' + + Colon = ":" + + LeftParenthesis = '(' + RightParenthesis = ')' +) diff --git a/vendor/github.com/denis-tingajkin/go-header/.gitignore b/vendor/github.com/denis-tingaikin/go-header/.gitignore similarity index 100% rename from vendor/github.com/denis-tingajkin/go-header/.gitignore rename to vendor/github.com/denis-tingaikin/go-header/.gitignore diff --git a/vendor/github.com/denis-tingajkin/go-header/.go-header.yml b/vendor/github.com/denis-tingaikin/go-header/.go-header.yml similarity index 99% rename from vendor/github.com/denis-tingajkin/go-header/.go-header.yml rename to vendor/github.com/denis-tingaikin/go-header/.go-header.yml index 446d7317ea..3f87c8798d 100644 --- a/vendor/github.com/denis-tingajkin/go-header/.go-header.yml +++ b/vendor/github.com/denis-tingaikin/go-header/.go-header.yml @@ -1,6 +1,6 @@ values: regexp: - copyright-holder: Copyright \(c\) {{year-range}} Denis Tingajkin + copyright-holder: Copyright \(c\) {{year-range}} Denis Tingaikin template: | {{copyright-holder}} diff --git a/vendor/github.com/denis-tingajkin/go-header/LICENSE b/vendor/github.com/denis-tingaikin/go-header/LICENSE similarity index 100% rename from vendor/github.com/denis-tingajkin/go-header/LICENSE rename to vendor/github.com/denis-tingaikin/go-header/LICENSE diff --git a/vendor/github.com/denis-tingajkin/go-header/README.md b/vendor/github.com/denis-tingaikin/go-header/README.md similarity index 87% rename from vendor/github.com/denis-tingajkin/go-header/README.md rename to vendor/github.com/denis-tingaikin/go-header/README.md index 1a2a3d9a6d..c2044ec96e 100644 --- a/vendor/github.com/denis-tingajkin/go-header/README.md +++ b/vendor/github.com/denis-tingaikin/go-header/README.md @@ -1,5 +1,5 @@ # go-header -[![Actions Status](https://github.com/denis-tingajkin/go-header/workflows/ci/badge.svg)](https://github.com/denis-tingajkin/go-header/actions) +[![ci](https://github.com/denis-tingaikin/go-header/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/denis-tingaikin/go-header/actions/workflows/ci.yml) Go source code linter providing checks for license headers. @@ -8,7 +8,7 @@ Go source code linter providing checks for license headers. For installation you can simply use `go get`. ```bash -go get github.com/denis-tingajkin/go-header/cmd/go-header +go get github.com/denis-tingaikin/go-header/cmd/go-header ``` ## Configuration @@ -17,8 +17,8 @@ To configuring `.go-header.yml` linter you simply need to fill the next fields: ```yaml --- -temaplte: # expects header template string. -tempalte-path: # expects path to file with license header string. +template: # expects header template string. +template-path: # expects path to file with license header string. values: # expects `const` or `regexp` node with values where values is a map string to string. const: key1: value1 # const value just checks equality. Note `key1` should be used in template string as {{ key1 }} or {{ KEY1 }}. diff --git a/vendor/github.com/denis-tingajkin/go-header/analyzer.go b/vendor/github.com/denis-tingaikin/go-header/analyzer.go similarity index 98% rename from vendor/github.com/denis-tingajkin/go-header/analyzer.go rename to vendor/github.com/denis-tingaikin/go-header/analyzer.go index 5707890b02..785a02e79d 100644 --- a/vendor/github.com/denis-tingajkin/go-header/analyzer.go +++ b/vendor/github.com/denis-tingaikin/go-header/analyzer.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/denis-tingajkin/go-header/config.go b/vendor/github.com/denis-tingaikin/go-header/config.go similarity index 94% rename from vendor/github.com/denis-tingajkin/go-header/config.go rename to vendor/github.com/denis-tingaikin/go-header/config.go index fa8b23c2d8..9576b949fd 100644 --- a/vendor/github.com/denis-tingajkin/go-header/config.go +++ b/vendor/github.com/denis-tingaikin/go-header/config.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // @@ -23,7 +23,7 @@ import ( "strings" "time" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) // Configuration represents go-header linter setup parameters @@ -40,7 +40,7 @@ func (c *Configuration) builtInValues() map[string]Value { var result = make(map[string]Value) year := fmt.Sprint(time.Now().Year()) result["year-range"] = &RegexpValue{ - RawValue: strings.ReplaceAll(`(20\d\d\-YEAR)|(YEAR)`, "YEAR", year), + RawValue: strings.ReplaceAll(`((20\d\d\-YEAR)|(YEAR))`, "YEAR", year), } result["year"] = &ConstValue{ RawValue: year, diff --git a/vendor/github.com/denis-tingajkin/go-header/issue.go b/vendor/github.com/denis-tingaikin/go-header/issue.go similarity index 96% rename from vendor/github.com/denis-tingajkin/go-header/issue.go rename to vendor/github.com/denis-tingaikin/go-header/issue.go index 2ff7bfd3cc..0ada0d62cf 100644 --- a/vendor/github.com/denis-tingajkin/go-header/issue.go +++ b/vendor/github.com/denis-tingaikin/go-header/issue.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/denis-tingajkin/go-header/location.go b/vendor/github.com/denis-tingaikin/go-header/location.go similarity index 95% rename from vendor/github.com/denis-tingajkin/go-header/location.go rename to vendor/github.com/denis-tingaikin/go-header/location.go index ba4d1907b1..9f18394855 100644 --- a/vendor/github.com/denis-tingajkin/go-header/location.go +++ b/vendor/github.com/denis-tingaikin/go-header/location.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/denis-tingajkin/go-header/option.go b/vendor/github.com/denis-tingaikin/go-header/option.go similarity index 96% rename from vendor/github.com/denis-tingajkin/go-header/option.go rename to vendor/github.com/denis-tingaikin/go-header/option.go index afbcb62e15..a9689e811e 100644 --- a/vendor/github.com/denis-tingajkin/go-header/option.go +++ b/vendor/github.com/denis-tingaikin/go-header/option.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/denis-tingajkin/go-header/reader.go b/vendor/github.com/denis-tingaikin/go-header/reader.go similarity index 98% rename from vendor/github.com/denis-tingajkin/go-header/reader.go rename to vendor/github.com/denis-tingaikin/go-header/reader.go index 2393c94882..9c9e88a177 100644 --- a/vendor/github.com/denis-tingajkin/go-header/reader.go +++ b/vendor/github.com/denis-tingaikin/go-header/reader.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2020 Denis Tingajkin +Copyright (c) 2020-2022 Denis Tingaikin SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/github.com/denis-tingajkin/go-header/value.go b/vendor/github.com/denis-tingaikin/go-header/value.go similarity index 98% rename from vendor/github.com/denis-tingajkin/go-header/value.go rename to vendor/github.com/denis-tingaikin/go-header/value.go index 2a3adcdce2..dcb206d353 100644 --- a/vendor/github.com/denis-tingajkin/go-header/value.go +++ b/vendor/github.com/denis-tingaikin/go-header/value.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Denis Tingajkin +// Copyright (c) 2020-2022 Denis Tingaikin // // SPDX-License-Identifier: Apache-2.0 // diff --git a/vendor/github.com/ettle/strcase/.golangci.yml b/vendor/github.com/ettle/strcase/.golangci.yml index 4d31fcc5b4..b7ce85d424 100644 --- a/vendor/github.com/ettle/strcase/.golangci.yml +++ b/vendor/github.com/ettle/strcase/.golangci.yml @@ -14,8 +14,6 @@ linters-settings: - ifElseChain - whyNoLint - wrapperFunc - golint: - min-confidence: 0.5 govet: check-shadowing: true lll: @@ -37,7 +35,6 @@ linters: disable-all: true enable: - bodyclose - - deadcode - depguard - dogsled - dupl @@ -47,26 +44,23 @@ linters: - gocyclo - gofmt - goimports - - golint - goprintffuncname - gosec - gosimple - govet - ineffassign - - interfacer - lll - misspell - nakedret - nolintlint + - revive - rowserrcheck - staticcheck - - structcheck - stylecheck - typecheck - unconvert - unparam - unused - - varcheck - whitespace # don't enable: diff --git a/vendor/github.com/ettle/strcase/.readme.tmpl b/vendor/github.com/ettle/strcase/.readme.tmpl index 135765c40a..4d7a894f0e 100644 --- a/vendor/github.com/ettle/strcase/.readme.tmpl +++ b/vendor/github.com/ettle/strcase/.readme.tmpl @@ -16,10 +16,10 @@ Convert strings to `snake_case`, `camelCase`, `PascalCase`, `kebab-case` and mor ## Index{{if .Consts}} * [Constants](#pkg-constants){{end}}{{if .Vars}} * [Variables](#pkg-variables){{end}}{{- range .Funcs -}}{{$name_html := html .Name}} -* [{{node_html $ .Decl false | sanitize}}](#{{$name_html}}){{- end}}{{- range .Types}}{{$tname_html := html .Name}} -* [type {{$tname_html}}](#{{$tname_html}}){{- range .Funcs}}{{$name_html := html .Name}} - * [{{node_html $ .Decl false | sanitize}}](#{{$name_html}}){{- end}}{{- range .Methods}}{{$name_html := html .Name}} - * [{{node_html $ .Decl false | sanitize}}](#{{$tname_html}}.{{$name_html}}){{- end}}{{- end}}{{- if $.Notes}}{{- range $marker, $item := $.Notes}} +* [{{node_html $ .Decl false | sanitize}}](#func-{{$name_html}}){{- end}}{{- range .Types}}{{$tname_html := html .Name}} +* [type {{$tname_html}}](#type-{{$tname_html}}){{- range .Funcs}}{{$name_html := html .Name}} + * [{{node_html $ .Decl false | sanitize}}](#func-{{$name_html}}){{- end}}{{- range .Methods}}{{$name_html := html .Name}} + * [{{node_html $ .Decl false | sanitize}}](#type-{{$tname_html}}.{{$name_html}}){{- end}}{{- end}}{{- if $.Notes}}{{- range $marker, $item := $.Notes}} * [{{noteTitle $marker | html}}s](#pkg-note-{{$marker}}){{end}}{{end}} {{if $.Examples}} #### Examples{{- range $.Examples}} diff --git a/vendor/github.com/ettle/strcase/Makefile b/vendor/github.com/ettle/strcase/Makefile index 462f8b473a..ac98b4aa54 100644 --- a/vendor/github.com/ettle/strcase/Makefile +++ b/vendor/github.com/ettle/strcase/Makefile @@ -1,16 +1,19 @@ .PHONY: benchmark docs lint test docs: - which godoc2ghmd || ( go get github.com/DevotedHealth/godoc2ghmd && go mod tidy ) + which godoc2ghmd || go get github.com/DevotedHealth/godoc2ghmd godoc2ghmd -template .readme.tmpl github.com/ettle/strcase > README.md + go mod tidy test: go test -cover ./... lint: - which golangci-lint || ( go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.27.0 && go mod tidy ) + which golangci-lint || go get github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.1 golangci-lint run golangci-lint run benchmark/*.go + go mod tidy benchmark: - cd benchmark && go test -bench=. -test.benchmem && go mod tidy + cd benchmark && go test -bench=. -test.benchmem + go mod tidy diff --git a/vendor/github.com/ettle/strcase/README.md b/vendor/github.com/ettle/strcase/README.md index ee165e3e5e..a984da80da 100644 --- a/vendor/github.com/ettle/strcase/README.md +++ b/vendor/github.com/ettle/strcase/README.md @@ -32,21 +32,24 @@ Example usage strcase.ToSnake("FOOBar") // foo_bar // Support Go initialisms - strcase.ToGoCamel("http_response") // HTTPResponse + strcase.ToGoPascal("http_response") // HTTPResponse // Specify case and delimiter strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD -### Why this package +## Why this package + String strcase is pretty straight forward and there are a number of methods to do it. This package is fully featured, more customizable, better tested, and -faster* than other packages and what you would probably whip up yourself. +faster than other packages and what you would probably whip up yourself. ### Unicode support + We work for with unicode strings and pay very little performance penalty for it as we optimized for the common use case of ASCII only strings. ### Customization + You can create a custom caser that changes the behavior to what you want. This customization also reduces the pressure for us to change the default behavior which means that things are more stable for everyone involved. The goal is to @@ -71,19 +74,22 @@ make the common path easy and fast, while making the uncommon path possible. assert.Equal(t, "http_200", c.ToSnake("http200")) ### Initialism support + By default, we use the golint intialisms list. You can customize and override the initialisms if you wish to add additional ones, such as "SSL" or "CMS" or domain specific ones to your industry. - ToGoCamel("http_response") // HTTPResponse + ToGoPascal("http_response") // HTTPResponse ToGoSnake("http_response") // HTTP_response ### Test coverage + We have a wide ranging test suite to make sure that we understand our behavior. Test coverage isn't everything, but we aim for 100% coverage. ### Fast + Optimized to reduce memory allocations with Builder. Benchmarked and optimized around common cases. @@ -96,56 +102,57 @@ Hopefully I was fair to each library and happy to rerun benchmarks differently or reword my commentary based on suggestions or updates. - // This package - // Go intialisms and custom casers are slower - BenchmarkToTitle-4 992491 1559 ns/op 32 B/op 1 allocs/op - BenchmarkToSnake-4 1000000 1475 ns/op 32 B/op 1 allocs/op - BenchmarkToSNAKE-4 1000000 1609 ns/op 32 B/op 1 allocs/op - BenchmarkToGoSnake-4 275010 3697 ns/op 44 B/op 4 allocs/op - BenchmarkToCustomCaser-4 342704 4191 ns/op 56 B/op 4 allocs/op + // This package - faster then almost all libraries + // Initialisms are more complicated and slightly slower, but still fast + BenchmarkToTitle-96 9617142 125.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSnake-96 10659919 120.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSNAKE-96 9018282 126.4 ns/op 16 B/op 1 allocs/op + BenchmarkToGoSnake-96 4903687 254.5 ns/op 26 B/op 4 allocs/op + BenchmarkToCustomCaser-96 4434489 265.0 ns/op 28 B/op 4 allocs/op // Segment has very fast snake case and camel case libraries // No features or customization, but very very fast - BenchmarkSegment-4 1303809 938 ns/op 16 B/op 1 allocs/op + BenchmarkSegment-96 33625734 35.54 ns/op 16 B/op 1 allocs/op - // Stdlib strings.Title for comparison, even though it only splits on spaces - BenchmarkToTitleStrings-4 1213467 1164 ns/op 16 B/op 1 allocs/op + // Iancoleman has gotten some performance improvements, but remains + // without unicode support and lacks fine-grained customization + BenchmarkToSnakeIan-96 13141522 92.99 ns/op 16 B/op 1 allocs/op + + // Stdlib strings.Title is deprecated; using golang.org/x.text + BenchmarkGolangOrgXTextCases-96 4665676 262.5 ns/op 272 B/op 2 allocs/op // Other libraries or code snippets // - Most are slower, by up to an order of magnitude - // - None support initialisms or customization + // - No support for initialisms or customization // - Some generate only camelCase or snake_case // - Many lack unicode support - BenchmarkToSnakeStoewer-4 973200 2075 ns/op 64 B/op 2 allocs/op + BenchmarkToSnakeStoewer-96 8095468 148.9 ns/op 64 B/op 2 allocs/op // Copying small rune arrays is slow - BenchmarkToSnakeSiongui-4 264315 4229 ns/op 48 B/op 10 allocs/op - BenchmarkGoValidator-4 206811 5152 ns/op 184 B/op 9 allocs/op + BenchmarkToSnakeSiongui-96 2912593 401.7 ns/op 112 B/op 19 allocs/op + BenchmarkGoValidator-96 3493800 342.6 ns/op 184 B/op 9 allocs/op // String alloction is slow - BenchmarkToSnakeFatih-4 82675 12280 ns/op 392 B/op 26 allocs/op - BenchmarkToSnakeIanColeman-4 83276 13903 ns/op 145 B/op 13 allocs/op + BenchmarkToSnakeFatih-96 1282648 945.1 ns/op 616 B/op 26 allocs/op // Regexp is slow - BenchmarkToSnakeGolangPrograms-4 74448 18586 ns/op 176 B/op 11 allocs/op + BenchmarkToSnakeGolangPrograms-96 778674 1495 ns/op 227 B/op 11 allocs/op // These results aren't a surprise - my initial version of this library was // painfully slow. I think most of us, without spending some time with // profilers and benchmarks, would write also something on the slower side. -### Why not this package +### Zero dependencies + +That's right - zero. We only import the Go standard library. No hassles with +dependencies, licensing, security alerts. + +## Why not this package + If every nanosecond matters and this is used in a tight loop, use segment.io's libraries (https://github.com/segmentio/go-snakecase and https://github.com/segmentio/go-camelcase). They lack features, but make up for -it by being blazing fast. Alternatively, if you need your code to work slightly -differently, fork them and tailor it for your use case. - -If you don't like having external imports, I get it. This package only imports -packages for testing, otherwise it only uses the standard library. If that's -not enough, you can use this repo as the foundation for your own. MIT Licensed. +it by being blazing fast. -This package is still relatively new and while I've used it for a while -personally, it doesn't have the miles that other packages do. I've tested this -code agains't their test cases to make sure that there aren't any surprises. +## Migrating from other packages -### Migrating from other packages If you are migrating from from another package, you may find slight differences in output. To reduce the delta, you may find it helpful to use the following custom casers to mimic the behavior of the other package. @@ -161,32 +168,32 @@ custom casers to mimic the behavior of the other package. ## Index -* [func ToCamel(s string) string](#ToCamel) -* [func ToCase(s string, wordCase WordCase, delimiter rune) string](#ToCase) -* [func ToGoCamel(s string) string](#ToGoCamel) -* [func ToGoCase(s string, wordCase WordCase, delimiter rune) string](#ToGoCase) -* [func ToGoKebab(s string) string](#ToGoKebab) -* [func ToGoPascal(s string) string](#ToGoPascal) -* [func ToGoSnake(s string) string](#ToGoSnake) -* [func ToKEBAB(s string) string](#ToKEBAB) -* [func ToKebab(s string) string](#ToKebab) -* [func ToPascal(s string) string](#ToPascal) -* [func ToSNAKE(s string) string](#ToSNAKE) -* [func ToSnake(s string) string](#ToSnake) -* [type Caser](#Caser) - * [func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser](#NewCaser) - * [func (c *Caser) ToCamel(s string) string](#Caser.ToCamel) - * [func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string](#Caser.ToCase) - * [func (c *Caser) ToKEBAB(s string) string](#Caser.ToKEBAB) - * [func (c *Caser) ToKebab(s string) string](#Caser.ToKebab) - * [func (c *Caser) ToPascal(s string) string](#Caser.ToPascal) - * [func (c *Caser) ToSNAKE(s string) string](#Caser.ToSNAKE) - * [func (c *Caser) ToSnake(s string) string](#Caser.ToSnake) -* [type SplitAction](#SplitAction) -* [type SplitFn](#SplitFn) - * [func NewSplitFn(delimiters []rune, splitOptions ...SplitOption) SplitFn](#NewSplitFn) -* [type SplitOption](#SplitOption) -* [type WordCase](#WordCase) +* [func ToCamel(s string) string](#func-ToCamel) +* [func ToCase(s string, wordCase WordCase, delimiter rune) string](#func-ToCase) +* [func ToGoCamel(s string) string](#func-ToGoCamel) +* [func ToGoCase(s string, wordCase WordCase, delimiter rune) string](#func-ToGoCase) +* [func ToGoKebab(s string) string](#func-ToGoKebab) +* [func ToGoPascal(s string) string](#func-ToGoPascal) +* [func ToGoSnake(s string) string](#func-ToGoSnake) +* [func ToKEBAB(s string) string](#func-ToKEBAB) +* [func ToKebab(s string) string](#func-ToKebab) +* [func ToPascal(s string) string](#func-ToPascal) +* [func ToSNAKE(s string) string](#func-ToSNAKE) +* [func ToSnake(s string) string](#func-ToSnake) +* [type Caser](#type-Caser) + * [func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser](#func-NewCaser) + * [func (c *Caser) ToCamel(s string) string](#type-Caser.ToCamel) + * [func (c *Caser) ToCase(s string, wordCase WordCase, delimiter rune) string](#type-Caser.ToCase) + * [func (c *Caser) ToKEBAB(s string) string](#type-Caser.ToKEBAB) + * [func (c *Caser) ToKebab(s string) string](#type-Caser.ToKebab) + * [func (c *Caser) ToPascal(s string) string](#type-Caser.ToPascal) + * [func (c *Caser) ToSNAKE(s string) string](#type-Caser.ToSNAKE) + * [func (c *Caser) ToSnake(s string) string](#type-Caser.ToSnake) +* [type SplitAction](#type-SplitAction) +* [type SplitFn](#type-SplitFn) + * [func NewSplitFn(delimiters []rune, splitOptions ...SplitOption) SplitFn](#func-NewSplitFn) +* [type SplitOption](#type-SplitOption) +* [type WordCase](#type-WordCase) @@ -201,7 +208,7 @@ Also known as lowerCamelCase or mixedCase. -## func [ToCase](./strcase.go#L70) +## func [ToCase](./strcase.go#L72) ``` go func ToCase(s string, wordCase WordCase, delimiter rune) string ``` @@ -209,18 +216,20 @@ ToCase returns words in given case and delimiter. -## func [ToGoCamel](./strcase.go#L65) +## func [ToGoCamel](./strcase.go#L67) ``` go func ToGoCamel(s string) string ``` ToGoCamel returns words in camelCase (capitalized words concatenated together, with first word lower case). Also known as lowerCamelCase or mixedCase. -Respects Go's common initialisms (e.g. httpResponse -> HTTPResponse). +Respects Go's common initialisms, but first word remains lowercased which is +important for code generator use cases (e.g. toJson -> toJSON, httpResponse +-> httpResponse). -## func [ToGoCase](./strcase.go#L77) +## func [ToGoCase](./strcase.go#L79) ``` go func ToGoCase(s string, wordCase WordCase, delimiter rune) string ``` @@ -415,7 +424,7 @@ ToSnake returns words in snake_case (lower case words with underscores). -## type [SplitAction](./split.go#L110) +## type [SplitAction](./split.go#L111) ``` go type SplitAction int ``` @@ -457,7 +466,7 @@ SplitFn defines how to split a string into words -### func [NewSplitFn](./split.go#L14-L17) +### func [NewSplitFn](./split.go#L15-L18) ``` go func NewSplitFn( delimiters []rune, @@ -469,13 +478,12 @@ NewSplitFn returns a SplitFn based on the options provided. NewSplitFn covers the majority of common options that other strcase libraries provide and should allow you to simply create a custom caser. For more complicated use cases, feel free to write your own SplitFn -nolint:gocyclo -## type [SplitOption](./split.go#L93) +## type [SplitOption](./split.go#L94) ``` go type SplitOption int ``` @@ -524,6 +532,9 @@ const ( // TitleCase - Only first letter upper cased (Example) TitleCase // CamelCase - TitleCase except lower case first word (exampleText) + // Notably, even if the first word is an initialism, it will be lower + // cased. This is important for code generators where capital letters + // mean exported functions. i.e. jsonString(), not JSONString() CamelCase ) ``` diff --git a/vendor/github.com/ettle/strcase/assert.go b/vendor/github.com/ettle/strcase/assert.go new file mode 100644 index 0000000000..09344e40f2 --- /dev/null +++ b/vendor/github.com/ettle/strcase/assert.go @@ -0,0 +1,24 @@ +package strcase + +// We use a lightweight replacement for testify/assert to reduce dependencies + +// testingT interface allows us to test our assert functions +type testingT interface { + Logf(format string, args ...interface{}) + Fail() +} + +// assertTrue will fail if the value is not true +func assertTrue(t testingT, value bool) { + if !value { + t.Fail() + } +} + +// assertEqual will fail if the two strings are not equal +func assertEqual(t testingT, expected, actual string) { + if expected != actual { + t.Logf("Expected: %s Actual: %s", expected, actual) + t.Fail() + } +} diff --git a/vendor/github.com/ettle/strcase/caser.go b/vendor/github.com/ettle/strcase/caser.go index 891a671897..2e7eb955ba 100644 --- a/vendor/github.com/ettle/strcase/caser.go +++ b/vendor/github.com/ettle/strcase/caser.go @@ -10,17 +10,17 @@ type Caser struct { // // A Caser should be created when you want fine grained control over how the words are split. // -// Notes on function arguments +// Notes on function arguments // -// goInitialisms: Whether to use Golint's intialisms +// goInitialisms: Whether to use Golint's intialisms // -// initialismOverrides: A mapping of extra initialisms -// Keys must be in ALL CAPS. Merged with Golint's if goInitialisms is set. -// Setting a key to false will override Golint's. +// initialismOverrides: A mapping of extra initialisms +// Keys must be in ALL CAPS. Merged with Golint's if goInitialisms is set. +// Setting a key to false will override Golint's. // -// splitFn: How to separate words -// Override the default split function. Consider using NewSplitFn to -// configure one instead of writing your own. +// splitFn: How to separate words +// Override the default split function. Consider using NewSplitFn to +// configure one instead of writing your own. func NewCaser(goInitialisms bool, initialismOverrides map[string]bool, splitFn SplitFn) *Caser { c := &Caser{ initialisms: golintInitialisms, diff --git a/vendor/github.com/ettle/strcase/convert.go b/vendor/github.com/ettle/strcase/convert.go index 70fedb1449..cb901d079d 100644 --- a/vendor/github.com/ettle/strcase/convert.go +++ b/vendor/github.com/ettle/strcase/convert.go @@ -29,6 +29,7 @@ const ( // Case 2: UpperCase words, which don't need to support initialisms since everything is in upper case // convertWithoutInitialims only works for to UpperCase and LowerCase +// //nolint:gocyclo func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) string { input = strings.TrimSpace(input) @@ -38,7 +39,7 @@ func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) } var b strings.Builder - b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + b.Grow(len(input) + 4) // In case we need to write delimiters where they weren't before var prev, curr rune next := runes[0] // 0 length will have already returned so safe to index @@ -90,13 +91,14 @@ func convertWithoutInitialisms(input string, delimiter rune, wordCase WordCase) // Must be original case b.WriteRune(curr) } - inWord = inWord || true + inWord = true } return b.String() } // convertWithGoInitialisms changes a input string to a certain case with a // delimiter, respecting go initialisms but not skip runes +// //nolint:gocyclo func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) string { input = strings.TrimSpace(input) @@ -106,7 +108,7 @@ func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) s } var b strings.Builder - b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + b.Grow(len(input) + 4) // In case we need to write delimiters where they weren't before firstWord := true @@ -122,10 +124,15 @@ func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) s // Don't bother with initialisms if the word is longer than 5 // A quick proxy to avoid the extra memory allocations if end-start <= 5 { - key := strings.ToUpper(string(runes[start:end])) - if golintInitialisms[key] { + var word strings.Builder + word.Grow(end - start) + for i := start; i < end; i++ { + word.WriteRune(toUpper(runes[i])) + } + w := word.String() + if golintInitialisms[w] { if !firstWord || wordCase != CamelCase { - b.WriteString(key) + b.WriteString(w) firstWord = false return } @@ -188,6 +195,7 @@ func convertWithGoInitialisms(input string, delimiter rune, wordCase WordCase) s // convert changes a input string to a certain case with a delimiter, // respecting arbitrary initialisms and skip characters +// //nolint:gocyclo func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, initialisms map[string]bool) string { @@ -198,7 +206,7 @@ func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, } var b strings.Builder - b.Grow(len(input) * 2) // In case we need to write delimiters where they weren't before + b.Grow(len(input) + 4) // In case we need to write delimiters where they weren't before firstWord := true var skipIndexes []int @@ -221,13 +229,14 @@ func convert(input string, fn SplitFn, delimiter rune, wordCase WordCase, // I'm open to it if there is a use case if initialisms != nil { var word strings.Builder + word.Grow(end - start) for i := start; i < end; i++ { word.WriteRune(toUpper(runes[i])) } - key := word.String() - if initialisms[key] { + w := word.String() + if initialisms[w] { if !firstWord || wordCase != CamelCase { - b.WriteString(key) + b.WriteString(w) firstWord = false return } diff --git a/vendor/github.com/ettle/strcase/doc.go b/vendor/github.com/ettle/strcase/doc.go index b898a4e45f..c3bf14a8f5 100644 --- a/vendor/github.com/ettle/strcase/doc.go +++ b/vendor/github.com/ettle/strcase/doc.go @@ -2,78 +2,78 @@ Package strcase is a package for converting strings into various word cases (e.g. snake_case, camelCase) - go get -u github.com/ettle/strcase + go get -u github.com/ettle/strcase Example usage - strcase.ToSnake("Hello World") // hello_world - strcase.ToSNAKE("Hello World") // HELLO_WORLD + strcase.ToSnake("Hello World") // hello_world + strcase.ToSNAKE("Hello World") // HELLO_WORLD - strcase.ToKebab("helloWorld") // hello-world - strcase.ToKEBAB("helloWorld") // HELLO-WORLD + strcase.ToKebab("helloWorld") // hello-world + strcase.ToKEBAB("helloWorld") // HELLO-WORLD - strcase.ToPascal("hello-world") // HelloWorld - strcase.ToCamel("hello-world") // helloWorld + strcase.ToPascal("hello-world") // HelloWorld + strcase.ToCamel("hello-world") // helloWorld - // Handle odd cases - strcase.ToSnake("FOOBar") // foo_bar + // Handle odd cases + strcase.ToSnake("FOOBar") // foo_bar - // Support Go initialisms - strcase.ToGoPascal("http_response") // HTTPResponse + // Support Go initialisms + strcase.ToGoPascal("http_response") // HTTPResponse - // Specify case and delimiter - strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD + // Specify case and delimiter + strcase.ToCase("HelloWorld", strcase.UpperCase, '.') // HELLO.WORLD -Why this package +## Why this package String strcase is pretty straight forward and there are a number of methods to do it. This package is fully featured, more customizable, better tested, and -faster* than other packages and what you would probably whip up yourself. +faster than other packages and what you would probably whip up yourself. -Unicode support +### Unicode support We work for with unicode strings and pay very little performance penalty for it as we optimized for the common use case of ASCII only strings. -Customization +### Customization You can create a custom caser that changes the behavior to what you want. This customization also reduces the pressure for us to change the default behavior which means that things are more stable for everyone involved. The goal is to make the common path easy and fast, while making the uncommon path possible. - c := NewCaser( - // Use Go's default initialisms e.g. ID, HTML - true, - // Override initialisms (e.g. don't initialize HTML but initialize SSL - map[string]bool{"SSL": true, "HTML": false}, - // Write your own custom SplitFn - // - NewSplitFn( - []rune{'*', '.', ','}, - SplitCase, - SplitAcronym, - PreserveNumberFormatting, - SplitBeforeNumber, - SplitAfterNumber, - )) - assert.Equal(t, "http_200", c.ToSnake("http200")) - -Initialism support + c := NewCaser( + // Use Go's default initialisms e.g. ID, HTML + true, + // Override initialisms (e.g. don't initialize HTML but initialize SSL + map[string]bool{"SSL": true, "HTML": false}, + // Write your own custom SplitFn + // + NewSplitFn( + []rune{'*', '.', ','}, + SplitCase, + SplitAcronym, + PreserveNumberFormatting, + SplitBeforeNumber, + SplitAfterNumber, + )) + assert.Equal(t, "http_200", c.ToSnake("http200")) + +### Initialism support By default, we use the golint intialisms list. You can customize and override the initialisms if you wish to add additional ones, such as "SSL" or "CMS" or domain specific ones to your industry. - ToGoPascal("http_response") // HTTPResponse - ToGoSnake("http_response") // HTTP_response + ToGoPascal("http_response") // HTTPResponse + ToGoSnake("http_response") // HTTP_response -Test coverage +### Test coverage We have a wide ranging test suite to make sure that we understand our behavior. Test coverage isn't everything, but we aim for 100% coverage. -Fast +### Fast Optimized to reduce memory allocations with Builder. Benchmarked and optimized around common cases. @@ -86,70 +86,65 @@ common cases have a large performance impact. Hopefully I was fair to each library and happy to rerun benchmarks differently or reword my commentary based on suggestions or updates. - // This package - faster then almost all libraries - // Initialisms are more complicated and slightly slower, but still faster then other libraries that do less - BenchmarkToTitle-4 7821166 221 ns/op 32 B/op 1 allocs/op - BenchmarkToSnake-4 9378589 202 ns/op 32 B/op 1 allocs/op - BenchmarkToSNAKE-4 6174453 223 ns/op 32 B/op 1 allocs/op - BenchmarkToGoSnake-4 3114266 434 ns/op 44 B/op 4 allocs/op - BenchmarkToCustomCaser-4 2973855 448 ns/op 56 B/op 4 allocs/op - - // Segment has very fast snake case and camel case libraries - // No features or customization, but very very fast - BenchmarkSegment-4 24003495 64.9 ns/op 16 B/op 1 allocs/op - - // Stdlib strings.Title for comparison, even though it only splits on spaces - BenchmarkToTitleStrings-4 11259376 161 ns/op 16 B/op 1 allocs/op - - // Other libraries or code snippets - // - Most are slower, by up to an order of magnitude - // - None support initialisms or customization - // - Some generate only camelCase or snake_case - // - Many lack unicode support - BenchmarkToSnakeStoewer-4 7103268 297 ns/op 64 B/op 2 allocs/op - // Copying small rune arrays is slow - BenchmarkToSnakeSiongui-4 3710768 413 ns/op 48 B/op 10 allocs/op - BenchmarkGoValidator-4 2416479 1049 ns/op 184 B/op 9 allocs/op - // String alloction is slow - BenchmarkToSnakeFatih-4 1000000 2407 ns/op 624 B/op 26 allocs/op - BenchmarkToSnakeIanColeman-4 1005766 1426 ns/op 160 B/op 13 allocs/op - // Regexp is slow - BenchmarkToSnakeGolangPrograms-4 614689 2237 ns/op 225 B/op 11 allocs/op - - - - // These results aren't a surprise - my initial version of this library was - // painfully slow. I think most of us, without spending some time with - // profilers and benchmarks, would write also something on the slower side. - - -Why not this package + // This package - faster then almost all libraries + // Initialisms are more complicated and slightly slower, but still fast + BenchmarkToTitle-96 9617142 125.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSnake-96 10659919 120.7 ns/op 16 B/op 1 allocs/op + BenchmarkToSNAKE-96 9018282 126.4 ns/op 16 B/op 1 allocs/op + BenchmarkToGoSnake-96 4903687 254.5 ns/op 26 B/op 4 allocs/op + BenchmarkToCustomCaser-96 4434489 265.0 ns/op 28 B/op 4 allocs/op + + // Segment has very fast snake case and camel case libraries + // No features or customization, but very very fast + BenchmarkSegment-96 33625734 35.54 ns/op 16 B/op 1 allocs/op + + // Iancoleman has gotten some performance improvements, but remains + // without unicode support and lacks fine-grained customization + BenchmarkToSnakeIan-96 13141522 92.99 ns/op 16 B/op 1 allocs/op + + // Stdlib strings.Title is deprecated; using golang.org/x.text + BenchmarkGolangOrgXTextCases-96 4665676 262.5 ns/op 272 B/op 2 allocs/op + + // Other libraries or code snippets + // - Most are slower, by up to an order of magnitude + // - No support for initialisms or customization + // - Some generate only camelCase or snake_case + // - Many lack unicode support + BenchmarkToSnakeStoewer-96 8095468 148.9 ns/op 64 B/op 2 allocs/op + // Copying small rune arrays is slow + BenchmarkToSnakeSiongui-96 2912593 401.7 ns/op 112 B/op 19 allocs/op + BenchmarkGoValidator-96 3493800 342.6 ns/op 184 B/op 9 allocs/op + // String alloction is slow + BenchmarkToSnakeFatih-96 1282648 945.1 ns/op 616 B/op 26 allocs/op + // Regexp is slow + BenchmarkToSnakeGolangPrograms-96 778674 1495 ns/op 227 B/op 11 allocs/op + + // These results aren't a surprise - my initial version of this library was + // painfully slow. I think most of us, without spending some time with + // profilers and benchmarks, would write also something on the slower side. + +### Zero dependencies + +That's right - zero. We only import the Go standard library. No hassles with +dependencies, licensing, security alerts. + +## Why not this package If every nanosecond matters and this is used in a tight loop, use segment.io's libraries (https://github.com/segmentio/go-snakecase and https://github.com/segmentio/go-camelcase). They lack features, but make up for -it by being blazing fast. Alternatively, if you need your code to work slightly -differently, fork them and tailor it for your use case. - -If you don't like having external imports, I get it. This package only imports -packages for testing, otherwise it only uses the standard library. If that's -not enough, you can use this repo as the foundation for your own. MIT Licensed. +it by being blazing fast. -This package is still relatively new and while I've used it for a while -personally, it doesn't have the miles that other packages do. I've tested this -code agains't their test cases to make sure that there aren't any surprises. - -Migrating from other packages +## Migrating from other packages If you are migrating from from another package, you may find slight differences in output. To reduce the delta, you may find it helpful to use the following custom casers to mimic the behavior of the other package. - // From https://github.com/iancoleman/strcase - var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-', '.'}, SplitCase, SplitAcronym, SplitBeforeNumber)) - - // From https://github.com/stoewer/go-strcase - var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-'}, SplitCase), SplitAcronym) + // From https://github.com/iancoleman/strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-', '.'}, SplitCase, SplitAcronym, SplitBeforeNumber)) + // From https://github.com/stoewer/go-strcase + var c = NewCaser(false, nil, NewSplitFn([]rune{'_', '-'}, SplitCase), SplitAcronym) */ package strcase diff --git a/vendor/github.com/ettle/strcase/split.go b/vendor/github.com/ettle/strcase/split.go index 84381106bc..32bc29759a 100644 --- a/vendor/github.com/ettle/strcase/split.go +++ b/vendor/github.com/ettle/strcase/split.go @@ -10,6 +10,7 @@ type SplitFn func(prev, curr, next rune) SplitAction // NewSplitFn covers the majority of common options that other strcase // libraries provide and should allow you to simply create a custom caser. // For more complicated use cases, feel free to write your own SplitFn +// //nolint:gocyclo func NewSplitFn( delimiters []rune, diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md index 5152bf59bf..be82827cac 100644 --- a/vendor/github.com/fatih/color/README.md +++ b/vendor/github.com/fatih/color/README.md @@ -7,7 +7,6 @@ suits you. ![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) - ## Install ```bash @@ -124,17 +123,17 @@ fmt.Println("All text will now be bold magenta.") ``` ### Disable/Enable color - + There might be a case where you want to explicitly disable/enable color output. the `go-isatty` package will automatically disable color output for non-tty output streams (for example if the output were piped directly to `less`). The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment -variable is set (regardless of its value). +variable is set to a non-empty string. -`Color` has support to disable/enable colors programatically both globally and +`Color` has support to disable/enable colors programmatically both globally and for single color definitions. For example suppose you have a CLI app and a -`--no-color` bool flag. You can easily disable the color output with: +`-no-color` bool flag. You can easily disable the color output with: ```go var flagNoColor = flag.Bool("no-color", false, "Disable color output") @@ -167,11 +166,10 @@ To output color in GitHub Actions (or other CI systems that support ANSI colors) * Save/Return previous values * Evaluate fmt.Formatter interface - ## Credits - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) +* [Fatih Arslan](https://github.com/fatih) +* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) ## License diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go index 98a60f3c88..c4234287dc 100644 --- a/vendor/github.com/fatih/color/color.go +++ b/vendor/github.com/fatih/color/color.go @@ -19,10 +19,10 @@ var ( // set (regardless of its value). This is a global option and affects all // colors. For more control over each color block use the methods // DisableColor() individually. - NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - // Output defines the standard output of the print functions. By default + // Output defines the standard output of the print functions. By default, // os.Stdout is used. Output = colorable.NewColorableStdout() @@ -35,10 +35,9 @@ var ( colorsCacheMu sync.Mutex // protects colorsCache ) -// noColorExists returns true if the environment variable NO_COLOR exists. -func noColorExists() bool { - _, exists := os.LookupEnv("NO_COLOR") - return exists +// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string. +func noColorIsSet() bool { + return os.Getenv("NO_COLOR") != "" } // Color defines a custom color object which is defined by SGR parameters. @@ -66,6 +65,29 @@ const ( CrossedOut ) +const ( + ResetBold Attribute = iota + 22 + ResetItalic + ResetUnderline + ResetBlinking + _ + ResetReversed + ResetConcealed + ResetCrossedOut +) + +var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{ + Bold: ResetBold, + Faint: ResetBold, + Italic: ResetItalic, + Underline: ResetUnderline, + BlinkSlow: ResetBlinking, + BlinkRapid: ResetBlinking, + ReverseVideo: ResetReversed, + Concealed: ResetConcealed, + CrossedOut: ResetCrossedOut, +} + // Foreground text colors const ( FgBlack Attribute = iota + 30 @@ -120,7 +142,7 @@ func New(value ...Attribute) *Color { params: make([]Attribute, 0), } - if noColorExists() { + if noColorIsSet() { c.noColor = boolPtr(true) } @@ -152,7 +174,7 @@ func (c *Color) Set() *Color { return c } - fmt.Fprintf(Output, c.format()) + fmt.Fprint(Output, c.format()) return c } @@ -164,16 +186,21 @@ func (c *Color) unset() { Unset() } -func (c *Color) setWriter(w io.Writer) *Color { +// SetWriter is used to set the SGR sequence with the given io.Writer. This is +// a low-level function, and users should use the higher-level functions, such +// as color.Fprint, color.Print, etc. +func (c *Color) SetWriter(w io.Writer) *Color { if c.isNoColorSet() { return c } - fmt.Fprintf(w, c.format()) + fmt.Fprint(w, c.format()) return c } -func (c *Color) unsetWriter(w io.Writer) { +// UnsetWriter resets all escape attributes and clears the output with the give +// io.Writer. Usually should be called after SetWriter(). +func (c *Color) UnsetWriter(w io.Writer) { if c.isNoColorSet() { return } @@ -192,20 +219,14 @@ func (c *Color) Add(value ...Attribute) *Color { return c } -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - // Fprint formats using the default formats for its operands and writes to w. // Spaces are added between operands when neither is a string. // It returns the number of bytes written and any write error encountered. // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprint(w, a...) } @@ -227,8 +248,8 @@ func (c *Color) Print(a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) + c.SetWriter(w) + defer c.UnsetWriter(w) return fmt.Fprintf(w, format, a...) } @@ -248,10 +269,7 @@ func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { // On Windows, users should wrap w with colorable.NewColorable() if w is of // type *os.File. func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) + return fmt.Fprintln(w, c.wrap(fmt.Sprint(a...))) } // Println formats using the default formats for its operands and writes to @@ -260,10 +278,7 @@ func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { // encountered. This is the standard fmt.Print() method wrapped with the given // color. func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) + return fmt.Fprintln(Output, c.wrap(fmt.Sprint(a...))) } // Sprint is just like Print, but returns a string instead of printing it. @@ -273,7 +288,7 @@ func (c *Color) Sprint(a ...interface{}) string { // Sprintln is just like Println, but returns a string instead of printing it. func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) + return fmt.Sprintln(c.Sprint(a...)) } // Sprintf is just like Printf, but returns a string instead of printing it. @@ -355,7 +370,7 @@ func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { // string. Windows users should use this in conjunction with color.Output. func (c *Color) SprintlnFunc() func(a ...interface{}) string { return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) + return fmt.Sprintln(c.Sprint(a...)) } } @@ -385,7 +400,18 @@ func (c *Color) format() string { } func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) + //return fmt.Sprintf("%s[%dm", escape, Reset) + //for each element in sequence let's use the speficic reset escape, ou the generic one if not found + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(Reset)) + ra, ok := mapResetAttributes[v] + if ok { + format[i] = strconv.Itoa(int(ra)) + } + } + + return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";")) } // DisableColor disables the color output. Useful to not change any existing @@ -396,7 +422,7 @@ func (c *Color) DisableColor() { } // EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. +// DisableColor(). Otherwise, this method has no side effects. func (c *Color) EnableColor() { c.noColor = boolPtr(false) } @@ -413,6 +439,12 @@ func (c *Color) isNoColorSet() bool { // Equals returns a boolean value indicating whether two colors are equal. func (c *Color) Equals(c2 *Color) bool { + if c == nil && c2 == nil { + return true + } + if c == nil || c2 == nil { + return false + } if len(c.params) != len(c2.params) { return false } diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go new file mode 100644 index 0000000000..be01c558e5 --- /dev/null +++ b/vendor/github.com/fatih/color/color_windows.go @@ -0,0 +1,19 @@ +package color + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func init() { + // Opt-in for ansi color support for current process. + // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences + var outMode uint32 + out := windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(out, &outMode); err != nil { + return + } + outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + _ = windows.SetConsoleMode(out, outMode) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go index 04541de786..9491ad5413 100644 --- a/vendor/github.com/fatih/color/doc.go +++ b/vendor/github.com/fatih/color/doc.go @@ -5,106 +5,105 @@ that suits you. Use simple and default helper functions with predefined foreground colors: - color.Cyan("Prints text in cyan.") + color.Cyan("Prints text in cyan.") - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") -However there are times where custom color mixes are required. Below are some +However, there are times when custom color mixes are required. Below are some examples to create custom color objects and use the print functions of each separate color object. - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") You can create PrintXxx functions to simplify even more: - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") You can also FprintXxx functions to pass your own io.Writer: - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") Or create SprintXxx functions to mix strings with other non-colorized strings: - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and +However, only for color.SprintXXX functions, user should use fmt.FprintXXX and set the output to color.Output: - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) Using with existing code is possible. Just use the Set() method to set the standard output to the given parameters. That way a rewrite of an existing code is not required. - // Use handy standard colors. - color.Set(color.FgYellow) + // Use handy standard colors. + color.Set(color.FgYellow) - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") - color.Unset() // don't forget to unset + color.Unset() // don't forget to unset - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function - fmt.Println("All text will be now bold magenta.") + fmt.Println("All text will be now bold magenta.") There might be a case where you want to disable color output (for example to pipe the standard output of your app to somewhere else). `Color` has support to @@ -112,24 +111,24 @@ disable colors both globally and for single color definition. For example suppose you have a CLI app and a `--no-color` bool flag. You can easily disable the color output with: - var flagNoColor = flag.Bool("no-color", false, "Disable color output") + var flagNoColor = flag.Bool("no-color", false, "Disable color output") - if *flagNoColor { - color.NoColor = true // disables colorized output - } + if *flagNoColor { + color.NoColor = true // disables colorized output + } You can also disable the color by setting the NO_COLOR environment variable to any value. It also has support for single color definitions (local). You can disable/enable color output on the fly: - c := color.New(color.FgCyan) - c.Println("Prints cyan text") + c := color.New(color.FgCyan) + c.Println("Prints cyan text") - c.DisableColor() - c.Println("This is printed without any color") + c.DisableColor() + c.Println("This is printed without any color") - c.EnableColor() - c.Println("This prints again cyan...") + c.EnableColor() + c.Println("This prints again cyan...") */ package color diff --git a/vendor/github.com/firefart/nonamedreturns/LICENSE b/vendor/github.com/firefart/nonamedreturns/LICENSE new file mode 100644 index 0000000000..f288702d2f --- /dev/null +++ b/vendor/github.com/firefart/nonamedreturns/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/firefart/nonamedreturns/analyzer/analyzer.go b/vendor/github.com/firefart/nonamedreturns/analyzer/analyzer.go new file mode 100644 index 0000000000..6ad97ab491 --- /dev/null +++ b/vendor/github.com/firefart/nonamedreturns/analyzer/analyzer.go @@ -0,0 +1,134 @@ +package analyzer + +import ( + "flag" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const FlagReportErrorInDefer = "report-error-in-defer" + +var Analyzer = &analysis.Analyzer{ + Name: "nonamedreturns", + Doc: "Reports all named returns", + Flags: flags(), + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func flags() flag.FlagSet { + fs := flag.FlagSet{} + fs.Bool(FlagReportErrorInDefer, false, "report named error if it is assigned inside defer") + return fs +} + +func run(pass *analysis.Pass) (interface{}, error) { + reportErrorInDefer := pass.Analyzer.Flags.Lookup(FlagReportErrorInDefer).Value.String() == "true" + errorType := types.Universe.Lookup("error").Type() + + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // only filter function defintions + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + var funcResults *ast.FieldList + var funcBody *ast.BlockStmt + + switch n := node.(type) { + case *ast.FuncLit: + funcResults = n.Type.Results + funcBody = n.Body + case *ast.FuncDecl: + funcResults = n.Type.Results + funcBody = n.Body + default: + return + } + + // no return values + if funcResults == nil { + return + } + + resultsList := funcResults.List + + for _, p := range resultsList { + if len(p.Names) == 0 { + // all good, the parameter is not named + continue + } + + for _, n := range p.Names { + if n.Name == "_" { + continue + } + + if !reportErrorInDefer && + types.Identical(pass.TypesInfo.TypeOf(p.Type), errorType) && + findDeferWithVariableAssignment(funcBody, pass.TypesInfo, pass.TypesInfo.ObjectOf(n)) { + continue + } + + pass.Reportf(node.Pos(), "named return %q with type %q found", n.Name, types.ExprString(p.Type)) + } + } + }) + + return nil, nil +} + +func findDeferWithVariableAssignment(body *ast.BlockStmt, info *types.Info, variable types.Object) bool { + found := false + + ast.Inspect(body, func(node ast.Node) bool { + if found { + return false // stop inspection + } + + if d, ok := node.(*ast.DeferStmt); ok { + if fn, ok2 := d.Call.Fun.(*ast.FuncLit); ok2 { + if findVariableAssignment(fn.Body, info, variable) { + found = true + return false + } + } + } + + return true + }) + + return found +} + +func findVariableAssignment(body *ast.BlockStmt, info *types.Info, variable types.Object) bool { + found := false + + ast.Inspect(body, func(node ast.Node) bool { + if found { + return false // stop inspection + } + + if a, ok := node.(*ast.AssignStmt); ok { + for _, lh := range a.Lhs { + if i, ok2 := lh.(*ast.Ident); ok2 { + if info.ObjectOf(i) == variable { + found = true + return false + } + } + } + } + + return true + }) + + return found +} diff --git a/vendor/github.com/ghostiam/protogetter/.goreleaser.yaml b/vendor/github.com/ghostiam/protogetter/.goreleaser.yaml new file mode 100644 index 0000000000..a70d0fb006 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/.goreleaser.yaml @@ -0,0 +1,24 @@ +before: + hooks: + - go mod tidy +builds: + - id: protogetter + main: ./cmd/protogetter + binary: protogetter + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + - '^ci:' \ No newline at end of file diff --git a/vendor/github.com/ghostiam/protogetter/LICENSE b/vendor/github.com/ghostiam/protogetter/LICENSE new file mode 100644 index 0000000000..b4449661b7 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Vladislav Fursov (GhostIAm) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ghostiam/protogetter/Makefile b/vendor/github.com/ghostiam/protogetter/Makefile new file mode 100644 index 0000000000..4c2a62af18 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + $(MAKE) -C testdata vendor + go test -v ./... + +.PHONY: install +install: + go install ./cmd/protogetter + @echo "Installed in $(shell which protogetter)" diff --git a/vendor/github.com/ghostiam/protogetter/README.md b/vendor/github.com/ghostiam/protogetter/README.md new file mode 100644 index 0000000000..c033e9597f --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/README.md @@ -0,0 +1,73 @@ +# Protogetter +Welcome to the Protogetter project! + +## Overview +Protogetter is a linter developed specifically for Go programmers working with nested `protobuf` types.\ +It's designed to aid developers in preventing `invalid memory address or nil pointer dereference` errors arising from direct access of nested `protobuf` fields. + +When working with `protobuf`, it's quite common to have complex structures where a message field is contained within another message, which itself can be part of another message, and so on. +If these fields are accessed directly and some field in the call chain will not be initialized, it can result in application panic. + +Protogetter addresses this issue by suggesting use of getter methods for field access. + +## How does it work? +Protogetter analyzes your Go code and helps detect direct `protobuf` field accesses that could give rise to panic.\ +The linter suggests using getters: +```go +m.GetFoo().GetBar().GetBaz() +``` +instead of direct field access: +```go +m.Foo.Bar.Baz +``` + +And you will then only need to perform a nil check after the final call: +```go +if m.GetFoo().GetBar().GetBaz() != nil { + // Do something with m.GetFoo().GetBar().GetBaz() +} +``` +instead of: +```go +if m.Foo != nil { + if m.Foo.Bar != nil { + if m.Foo.Bar.Baz != nil { + // Do something with m.Foo.Bar.Baz + } + } +} +``` + +or use zero values: + +```go +// If one of the methods returns `nil` we will receive 0 instead of panic. +v := m.GetFoo().GetBar().GetBaz().GetInt() +``` + +instead of panic: + +```go +// If at least one structure in the chains is not initialized, we will get a panic. +v := m.Foo.Bar.Baz.Int +``` + +which simplifies the code and makes it more reliable. + +## Installation + +```bash +go install github.com/ghostiam/protogetter/cmd/protogetter@latest +``` + +## Usage + +To run the linter: +```bash +protogetter ./... +``` + +Or to apply suggested fixes directly: +```bash +protogetter --fix ./... +``` diff --git a/vendor/github.com/ghostiam/protogetter/posfilter.go b/vendor/github.com/ghostiam/protogetter/posfilter.go new file mode 100644 index 0000000000..82075ccb16 --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/posfilter.go @@ -0,0 +1,65 @@ +package protogetter + +import ( + "go/token" +) + +type PosFilter struct { + positions map[token.Pos]struct{} + alreadyReplaced map[string]map[int][2]int // map[filename][line][start, end] +} + +func NewPosFilter() *PosFilter { + return &PosFilter{ + positions: make(map[token.Pos]struct{}), + alreadyReplaced: make(map[string]map[int][2]int), + } +} + +func (f *PosFilter) IsFiltered(pos token.Pos) bool { + _, ok := f.positions[pos] + return ok +} + +func (f *PosFilter) AddPos(pos token.Pos) { + f.positions[pos] = struct{}{} +} + +func (f *PosFilter) IsAlreadyReplaced(fset *token.FileSet, pos, end token.Pos) bool { + filePos := fset.Position(pos) + fileEnd := fset.Position(end) + + lines, ok := f.alreadyReplaced[filePos.Filename] + if !ok { + return false + } + + lineRange, ok := lines[filePos.Line] + if !ok { + return false + } + + if lineRange[0] <= filePos.Offset && fileEnd.Offset <= lineRange[1] { + return true + } + + return false +} + +func (f *PosFilter) AddAlreadyReplaced(fset *token.FileSet, pos, end token.Pos) { + filePos := fset.Position(pos) + fileEnd := fset.Position(end) + + lines, ok := f.alreadyReplaced[filePos.Filename] + if !ok { + lines = make(map[int][2]int) + f.alreadyReplaced[filePos.Filename] = lines + } + + lineRange, ok := lines[filePos.Line] + if ok && lineRange[0] <= filePos.Offset && fileEnd.Offset <= lineRange[1] { + return + } + + lines[filePos.Line] = [2]int{filePos.Offset, fileEnd.Offset} +} diff --git a/vendor/github.com/ghostiam/protogetter/processor.go b/vendor/github.com/ghostiam/protogetter/processor.go new file mode 100644 index 0000000000..d65199dd2e --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/processor.go @@ -0,0 +1,353 @@ +package protogetter + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + "strings" +) + +type processor struct { + info *types.Info + filter *PosFilter + cfg *Config + + to strings.Builder + from strings.Builder + err error +} + +func Process(info *types.Info, filter *PosFilter, n ast.Node, cfg *Config) (*Result, error) { + p := &processor{ + info: info, + filter: filter, + cfg: cfg, + } + + return p.process(n) +} + +func (c *processor) process(n ast.Node) (*Result, error) { + switch x := n.(type) { + case *ast.AssignStmt: + // Skip any assignment to the field. + for _, s := range x.Lhs { + c.filter.AddPos(s.Pos()) + + if se, ok := s.(*ast.StarExpr); ok { + c.filter.AddPos(se.X.Pos()) + } + } + + case *ast.IncDecStmt: + // Skip any increment/decrement to the field. + c.filter.AddPos(x.X.Pos()) + + case *ast.UnaryExpr: + if x.Op == token.AND { + // Skip all expressions when the field is used as a pointer. + // Because this is not direct reading, but most likely writing by pointer (for example like sql.Scan). + c.filter.AddPos(x.X.Pos()) + } + + case *ast.CallExpr: + if !c.cfg.ReplaceFirstArgInAppend && len(x.Args) > 0 { + if v, ok := x.Fun.(*ast.Ident); ok && v.Name == "append" { + // Skip first argument of append function. + c.filter.AddPos(x.Args[0].Pos()) + break + } + } + + f, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return &Result{}, nil + } + + if !isProtoMessage(c.info, f.X) { + return &Result{}, nil + } + + c.processInner(x) + + case *ast.SelectorExpr: + if !isProtoMessage(c.info, x.X) { + // If the selector is not on a proto message, skip it. + return &Result{}, nil + } + + c.processInner(x) + + case *ast.StarExpr: + f, ok := x.X.(*ast.SelectorExpr) + if !ok { + return &Result{}, nil + } + + if !isProtoMessage(c.info, f.X) { + return &Result{}, nil + } + + // proto2 generates fields as pointers. Hence, the indirection + // must be removed when generating the fix for the case. + // The `*` is retained in `c.from`, but excluded from the fix + // present in the `c.to`. + c.writeFrom("*") + c.processInner(x.X) + + case *ast.BinaryExpr: + // Check if the expression is a comparison. + if x.Op != token.EQL && x.Op != token.NEQ { + return &Result{}, nil + } + + // Check if one of the operands is nil. + + xIdent, xOk := x.X.(*ast.Ident) + yIdent, yOk := x.Y.(*ast.Ident) + + xIsNil := xOk && xIdent.Name == "nil" + yIsNil := yOk && yIdent.Name == "nil" + + if !xIsNil && !yIsNil { + return &Result{}, nil + } + + // Extract the non-nil operand for further checks + + var expr ast.Expr + if xIsNil { + expr = x.Y + } else { + expr = x.X + } + + se, ok := expr.(*ast.SelectorExpr) + if !ok { + return &Result{}, nil + } + + if !isProtoMessage(c.info, se.X) { + return &Result{}, nil + } + + // Check if the Getter function of the protobuf message returns a pointer. + hasPointer, ok := getterResultHasPointer(c.info, se.X, se.Sel.Name) + if !ok || hasPointer { + return &Result{}, nil + } + + c.filter.AddPos(x.X.Pos()) + + default: + return nil, fmt.Errorf("not implemented for type: %s (%s)", reflect.TypeOf(x), formatNode(n)) + } + + if c.err != nil { + return nil, c.err + } + + return &Result{ + From: c.from.String(), + To: c.to.String(), + }, nil +} + +func (c *processor) processInner(expr ast.Expr) { + switch x := expr.(type) { + case *ast.Ident: + c.write(x.Name) + + case *ast.BasicLit: + c.write(x.Value) + + case *ast.UnaryExpr: + if x.Op == token.AND { + c.write(formatNode(x)) + return + } + + c.write(x.Op.String()) + c.processInner(x.X) + + case *ast.SelectorExpr: + c.processInner(x.X) + c.write(".") + + // If getter exists, use it. + if methodIsExists(c.info, x.X, "Get"+x.Sel.Name) { + c.writeFrom(x.Sel.Name) + c.writeTo("Get" + x.Sel.Name + "()") + return + } + + // If the selector is not a proto-message or the method has already been called, we leave it unchanged. + // This approach is significantly more efficient than verifying the presence of methods in all cases. + c.write(x.Sel.Name) + + case *ast.CallExpr: + c.processInner(x.Fun) + c.write("(") + for i, arg := range x.Args { + if i > 0 { + c.write(",") + } + c.processInner(arg) + } + c.write(")") + + case *ast.IndexExpr: + c.processInner(x.X) + c.write("[") + c.processInner(x.Index) + c.write("]") + + case *ast.BinaryExpr: + c.processInner(x.X) + c.write(x.Op.String()) + c.processInner(x.Y) + + case *ast.ParenExpr: + c.write("(") + c.processInner(x.X) + c.write(")") + + case *ast.StarExpr: + c.write("*") + c.processInner(x.X) + + case *ast.CompositeLit: + c.write(formatNode(x)) + + case *ast.TypeAssertExpr: + c.write(formatNode(x)) + + default: + c.err = fmt.Errorf("processInner: not implemented for type: %s", reflect.TypeOf(x)) + } +} + +func (c *processor) write(s string) { + c.writeTo(s) + c.writeFrom(s) +} + +func (c *processor) writeTo(s string) { + c.to.WriteString(s) +} + +func (c *processor) writeFrom(s string) { + c.from.WriteString(s) +} + +// Result contains source code (from) and suggested change (to) +type Result struct { + From string + To string +} + +func (r *Result) Skipped() bool { + // If from and to are the same, skip it. + return r.From == r.To +} + +func isProtoMessage(info *types.Info, expr ast.Expr) bool { + // First, we are checking for the presence of the ProtoReflect method which is currently being generated + // and corresponds to v2 version. + // https://pkg.go.dev/google.golang.org/protobuf@v1.31.0/proto#Message + const protoV2Method = "ProtoReflect" + ok := methodIsExists(info, expr, protoV2Method) + if ok { + return true + } + + // Afterwards, we are checking the ProtoMessage method. All the structures that implement the proto.Message interface + // have a ProtoMessage method and are proto-structures. This interface has been generated since version 1.0.0 and + // continues to exist for compatibility. + // https://pkg.go.dev/github.com/golang/protobuf/proto?utm_source=godoc#Message + const protoV1Method = "ProtoMessage" + ok = methodIsExists(info, expr, protoV1Method) + if ok { + // Since there is a protoc-gen-gogo generator that implements the proto.Message interface, but may not generate + // getters or generate from without checking for nil, so even if getters exist, we skip them. + const protocGenGoGoMethod = "MarshalToSizedBuffer" + return !methodIsExists(info, expr, protocGenGoGoMethod) + } + + return false +} + +func typesNamed(info *types.Info, x ast.Expr) (*types.Named, bool) { + if info == nil { + return nil, false + } + + t := info.TypeOf(x) + if t == nil { + return nil, false + } + + ptr, ok := t.Underlying().(*types.Pointer) + if ok { + t = ptr.Elem() + } + + named, ok := t.(*types.Named) + if !ok { + return nil, false + } + + return named, true +} + +func methodIsExists(info *types.Info, x ast.Expr, name string) bool { + named, ok := typesNamed(info, x) + if !ok { + return false + } + + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i).Name() == name { + return true + } + } + + return false +} + +func getterResultHasPointer(info *types.Info, x ast.Expr, name string) (hasPointer, ok bool) { + named, ok := typesNamed(info, x) + if !ok { + return false, false + } + + for i := 0; i < named.NumMethods(); i++ { + method := named.Method(i) + if method.Name() != "Get"+name { + continue + } + + var sig *types.Signature + sig, ok = method.Type().(*types.Signature) + if !ok { + return false, false + } + + results := sig.Results() + if results.Len() == 0 { + return false, false + } + + firstType := results.At(0) + _, ok = firstType.Type().(*types.Pointer) + if !ok { + return false, true + } + + return true, true + } + + return false, false +} diff --git a/vendor/github.com/ghostiam/protogetter/protogetter.go b/vendor/github.com/ghostiam/protogetter/protogetter.go new file mode 100644 index 0000000000..31eee8572a --- /dev/null +++ b/vendor/github.com/ghostiam/protogetter/protogetter.go @@ -0,0 +1,279 @@ +package protogetter + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/token" + "log" + "path/filepath" + "strings" + + "github.com/gobwas/glob" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +type Mode int + +const ( + StandaloneMode Mode = iota + GolangciLintMode +) + +const msgFormat = "avoid direct access to proto field %s, use %s instead" + +func NewAnalyzer(cfg *Config) *analysis.Analyzer { + if cfg == nil { + cfg = &Config{} + } + + return &analysis.Analyzer{ + Name: "protogetter", + Doc: "Reports direct reads from proto message fields when getters should be used", + Flags: flags(cfg), + Run: func(pass *analysis.Pass) (any, error) { + _, err := Run(pass, cfg) + return nil, err + }, + } +} + +func flags(opts *Config) flag.FlagSet { + fs := flag.NewFlagSet("protogetter", flag.ContinueOnError) + + fs.Func("skip-generated-by", "skip files generated with the given prefixes", func(s string) error { + for _, prefix := range strings.Split(s, ",") { + opts.SkipGeneratedBy = append(opts.SkipGeneratedBy, prefix) + } + return nil + }) + fs.Func("skip-files", "skip files with the given glob patterns", func(s string) error { + for _, pattern := range strings.Split(s, ",") { + opts.SkipFiles = append(opts.SkipFiles, pattern) + } + return nil + }) + fs.BoolVar(&opts.SkipAnyGenerated, "skip-any-generated", false, "skip any generated files") + + return *fs +} + +type Config struct { + Mode Mode // Zero value is StandaloneMode. + SkipGeneratedBy []string + SkipFiles []string + SkipAnyGenerated bool + ReplaceFirstArgInAppend bool +} + +func Run(pass *analysis.Pass, cfg *Config) ([]Issue, error) { + skipGeneratedBy := make([]string, 0, len(cfg.SkipGeneratedBy)+3) + // Always skip files generated by protoc-gen-go, protoc-gen-go-grpc and protoc-gen-grpc-gateway. + skipGeneratedBy = append(skipGeneratedBy, "protoc-gen-go", "protoc-gen-go-grpc", "protoc-gen-grpc-gateway") + for _, s := range cfg.SkipGeneratedBy { + s = strings.TrimSpace(s) + if s == "" { + continue + } + skipGeneratedBy = append(skipGeneratedBy, s) + } + + skipFilesGlobPatterns := make([]glob.Glob, 0, len(cfg.SkipFiles)) + for _, s := range cfg.SkipFiles { + s = strings.TrimSpace(s) + if s == "" { + continue + } + + compile, err := glob.Compile(s) + if err != nil { + return nil, fmt.Errorf("invalid glob pattern: %w", err) + } + + skipFilesGlobPatterns = append(skipFilesGlobPatterns, compile) + } + + nodeTypes := []ast.Node{ + (*ast.AssignStmt)(nil), + (*ast.BinaryExpr)(nil), + (*ast.CallExpr)(nil), + (*ast.SelectorExpr)(nil), + (*ast.StarExpr)(nil), + (*ast.IncDecStmt)(nil), + (*ast.UnaryExpr)(nil), + } + + // Skip filtered files. + var files []*ast.File + for _, f := range pass.Files { + if skipGeneratedFile(f, skipGeneratedBy, cfg.SkipAnyGenerated) { + continue + } + + if skipFilesByGlob(pass.Fset.File(f.Pos()).Name(), skipFilesGlobPatterns) { + continue + } + + files = append(files, f) + + // ast.Print(pass.Fset, f) + } + + ins := inspector.New(files) + + var issues []Issue + + filter := NewPosFilter() + ins.Preorder(nodeTypes, func(node ast.Node) { + report := analyse(pass, filter, node, cfg) + if report == nil { + return + } + + switch cfg.Mode { + case StandaloneMode: + pass.Report(report.ToDiagReport()) + case GolangciLintMode: + issues = append(issues, report.ToIssue(pass.Fset)) + } + }) + + return issues, nil +} + +func analyse(pass *analysis.Pass, filter *PosFilter, n ast.Node, cfg *Config) *Report { + // fmt.Printf("\n>>> check: %s\n", formatNode(n)) + // ast.Print(pass.Fset, n) + if filter.IsFiltered(n.Pos()) { + // fmt.Printf(">>> filtered\n") + return nil + } + + result, err := Process(pass.TypesInfo, filter, n, cfg) + if err != nil { + pass.Report(analysis.Diagnostic{ + Pos: n.Pos(), + End: n.End(), + Message: fmt.Sprintf("error: %v", err), + }) + + return nil + } + + // If existing in filter, skip it. + if filter.IsFiltered(n.Pos()) { + return nil + } + + if result.Skipped() { + return nil + } + + // If the expression has already been replaced, skip it. + if filter.IsAlreadyReplaced(pass.Fset, n.Pos(), n.End()) { + return nil + } + // Add the expression to the filter. + filter.AddAlreadyReplaced(pass.Fset, n.Pos(), n.End()) + + return &Report{ + node: n, + result: result, + } +} + +// Issue is used to integrate with golangci-lint's inline auto fix. +type Issue struct { + Pos token.Position + Message string + InlineFix InlineFix +} + +type InlineFix struct { + StartCol int // zero-based + Length int + NewString string +} + +type Report struct { + node ast.Node + result *Result +} + +func (r *Report) ToDiagReport() analysis.Diagnostic { + msg := fmt.Sprintf(msgFormat, r.result.From, r.result.To) + + return analysis.Diagnostic{ + Pos: r.node.Pos(), + End: r.node.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: r.node.Pos(), + End: r.node.End(), + NewText: []byte(r.result.To), + }, + }, + }, + }, + } +} + +func (r *Report) ToIssue(fset *token.FileSet) Issue { + msg := fmt.Sprintf(msgFormat, r.result.From, r.result.To) + return Issue{ + Pos: fset.Position(r.node.Pos()), + Message: msg, + InlineFix: InlineFix{ + StartCol: fset.Position(r.node.Pos()).Column - 1, + Length: len(r.result.From), + NewString: r.result.To, + }, + } +} + +func skipGeneratedFile(f *ast.File, prefixes []string, skipAny bool) bool { + if len(f.Comments) == 0 { + return false + } + firstComment := f.Comments[0].Text() + + // https://golang.org/s/generatedcode + if skipAny && strings.HasPrefix(firstComment, "Code generated") { + return true + } + + for _, prefix := range prefixes { + if strings.HasPrefix(firstComment, "Code generated by "+prefix) { + return true + } + } + + return false +} + +func skipFilesByGlob(filename string, patterns []glob.Glob) bool { + for _, pattern := range patterns { + if pattern.Match(filename) || pattern.Match(filepath.Base(filename)) { + return true + } + } + + return false +} + +func formatNode(node ast.Node) string { + buf := new(bytes.Buffer) + if err := format.Node(buf, token.NewFileSet(), node); err != nil { + log.Printf("Error formatting expression: %v", err) + return "" + } + + return buf.String() +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go index a9324dd02e..2a67dccec8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/appendAssign_checker.go @@ -6,7 +6,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" "golang.org/x/tools/go/ast/astutil" @@ -15,7 +16,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "appendAssign" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious append result assignments" info.Before = ` p.positives = append(p.negatives, x) diff --git a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go index 3c81449e9c..81a7aa30b3 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/appendCombine_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "appendCombine" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Summary = "Detects `append` chains to the same slice that can be done in a single `append` call" info.Before = ` xs = append(xs, 1) diff --git a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go index 149f0ac88a..9be45ccc78 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/badCond_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" @@ -18,7 +19,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "badCond" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious condition expressions" info.Before = ` for i := 0; i > n; i++ { @@ -114,30 +115,43 @@ func (c *badCondChecker) checkForStmt(stmt *ast.ForStmt) { iter := astcast.ToIdent(init.Lhs[0]) cond := astcast.ToBinaryExpr(stmt.Cond) - if cond.Op != token.GTR || !astequal.Expr(iter, cond.X) { + + var i, n ast.Expr + var op token.Token + switch { + case cond.Op == token.GTR && astequal.Expr(iter, cond.X): + i = cond.X + n = cond.Y + op = token.LSS + case cond.Op == token.LSS && astequal.Expr(iter, cond.Y): + i = cond.Y + n = cond.X + op = token.GTR + default: return } - if !typep.SideEffectFree(c.ctx.TypesInfo, cond.Y) { + + if !typep.SideEffectFree(c.ctx.TypesInfo, n) { return } post := astcast.ToIncDecStmt(stmt.Post) - if post.Tok != token.INC || !astequal.Expr(iter, post.X) { + if post.Tok != token.INC || !astequal.Expr(iter, i) { return } - mutated := lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, cond.Y) || + mutated := lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, n) || lintutil.CouldBeMutated(c.ctx.TypesInfo, stmt.Body, iter) if mutated { return } - c.warnForStmt(stmt, cond) + c.warnForStmt(stmt, op, cond) } -func (c *badCondChecker) warnForStmt(cause ast.Node, cond *ast.BinaryExpr) { +func (c *badCondChecker) warnForStmt(cause ast.Node, op token.Token, cond *ast.BinaryExpr) { suggest := astcopy.BinaryExpr(cond) - suggest.Op = token.LSS + suggest.Op = op c.ctx.Warn(cause, "`%s` in loop; probably meant `%s`?", cond, suggest) } diff --git a/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go index e0d4b7487f..6c6845053d 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/badRegexp_checker.go @@ -9,14 +9,15 @@ import ( "unicode/utf8" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/quasilyte/regex/syntax" ) func init() { var info linter.CheckerInfo info.Name = "badRegexp" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious regexp patterns" info.Before = "regexp.MustCompile(`(?:^aa|bb|cc)foo[aba]`)" info.After = "regexp.MustCompile(`^(?:aa|bb|cc)foo[ab]`)" @@ -365,7 +366,7 @@ func (c *badRegexpChecker) checkCharClassDups(cc syntax.Expr) { } // 2. Sort ranges, O(nlogn). - sort.Slice(ranges, func(i, j int) bool { + sort.SliceStable(ranges, func(i, j int) bool { return ranges[i].low < ranges[j].low }) diff --git a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go index b4000a8ce7..a1c69cb7ab 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/boolExprSimplify_checker.go @@ -5,22 +5,22 @@ import ( "go/token" "strconv" + "github.com/go-critic/go-critic/checkers/internal/astwalk" + "github.com/go-critic/go-critic/checkers/internal/lintutil" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" "github.com/go-toolsmith/typep" "golang.org/x/tools/go/ast/astutil" - - "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" ) func init() { var info linter.CheckerInfo info.Name = "boolExprSimplify" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects bool expressions that can be simplified" info.Before = ` a := !(elapsed >= expectElapsedMin) diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go index 94d51a996a..d8be10ce9c 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadowDecl_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "builtinShadowDecl" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects top-level declarations that shadow the predeclared identifiers" info.Before = `type int struct {}` info.After = `type myInt struct {}` diff --git a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go index 1e1661deb2..0b4b7bafb8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/builtinShadow_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "builtinShadow" - info.Tags = []string{"style", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} info.Summary = "Detects when predeclared identifiers are shadowed in assignments" info.Before = `len := 10` info.After = `length := 10` diff --git a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go index d9b4b7e75d..b31a6f7fd3 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/captLocal_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "captLocal" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Params = linter.CheckerParams{ "paramsOnly": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go index 047ea4fee0..306756834b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/caseOrder_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "caseOrder" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects erroneous case order inside switch statements" info.Before = ` switch x.(type) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/checkers.go b/vendor/github.com/go-critic/go-critic/checkers/checkers.go index 0c2ebc00ca..5797dafdf4 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/checkers.go +++ b/vendor/github.com/go-critic/go-critic/checkers/checkers.go @@ -4,7 +4,7 @@ package checkers import ( "os" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) var collection = &linter.CheckerCollection{ diff --git a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go index 52a72d28c8..6eeb0bb5db 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/codegenComment_checker.go @@ -6,13 +6,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "codegenComment" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects malformed 'code generated' file comments" info.Before = `// This file was automatically generated by foogen` info.After = `// Code generated by foogen. DO NOT EDIT.` diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go index f330b723a0..5a9564a0f9 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/commentFormatting_checker.go @@ -8,13 +8,13 @@ import ( "unicode/utf8" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "commentFormatting" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects comments with non-idiomatic formatting" info.Before = `//This is a comment` info.After = `// This is a comment` diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go index 554e0621fd..8595b79515 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go @@ -6,17 +6,25 @@ import ( "go/token" "regexp" "strings" + "unicode/utf8" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/strparse" ) func init() { var info linter.CheckerInfo info.Name = "commentedOutCode" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects commented-out code inside function bodies" + info.Params = linter.CheckerParams{ + "minLength": { + Value: 15, + Usage: "min length of the comment that triggers a warning", + }, + } info.Before = ` // fmt.Println("Debugging hard") foo(1, 2)` @@ -26,6 +34,7 @@ foo(1, 2)` return astwalk.WalkerForLocalComment(&commentedOutCodeChecker{ ctx: ctx, notQuiteFuncCall: regexp.MustCompile(`\w+\s+\([^)]*\)\s*$`), + minLength: info.Params.Int("minLength"), }), nil }) } @@ -36,6 +45,7 @@ type commentedOutCodeChecker struct { fn *ast.FuncDecl notQuiteFuncCall *regexp.Regexp + minLength int } func (c *commentedOutCodeChecker) EnterFunc(fn *ast.FuncDecl) bool { @@ -68,7 +78,7 @@ func (c *commentedOutCodeChecker) VisitLocalComment(cg *ast.CommentGroup) { // Some very short comment that can be skipped. // Usually triggering on these results in false positive. // Unless there is a very popular call like print/println. - cond := len(s) < len("quite too short") && + cond := utf8.RuneCountInString(s) < c.minLength && !strings.Contains(s, "print") && !strings.Contains(s, "fmt.") && !strings.Contains(s, "log.") diff --git a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go index 3c086569b1..e0855da812 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/commentedOutImport_checker.go @@ -6,13 +6,13 @@ import ( "regexp" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "commentedOutImport" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects commented-out imports" info.Before = ` import ( diff --git a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go index e06944d624..cdebaef987 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/defaultCaseOrder_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "defaultCaseOrder" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects when default case in switch isn't on 1st or last position" info.Before = ` switch { diff --git a/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go index da90fe67a2..37c80c864a 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/deferInLoop_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "deferInLoop" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects loops inside functions that use defer" info.Before = ` for _, filename := range []string{"foo", "bar"} { diff --git a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go index 0eb5072375..c61d773da6 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/deprecatedComment_checker.go @@ -5,13 +5,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "deprecatedComment" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects malformed 'deprecated' doc-comments" info.Before = ` // deprecated, use FuncNew instead diff --git a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go index d8aaaf7437..aa23de42c4 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/docStub_checker.go @@ -7,13 +7,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "docStub" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects comments that silence go lint complaints about doc-comment" info.Before = ` // Foo ... diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go index 83de505280..c4f0183878 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupBranchBody_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" ) func init() { var info linter.CheckerInfo info.Name = "dupBranchBody" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects duplicated branch bodies inside conditional statements" info.Before = ` if cond { @@ -54,5 +55,5 @@ func (c *dupBranchBodyChecker) checkIf(stmt *ast.IfStmt) { } func (c *dupBranchBodyChecker) warnIf(cause ast.Node) { - c.ctx.Warn(cause, "both branches in if statement has same body") + c.ctx.Warn(cause, "both branches in if statement have same body") } diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go index a565007601..381bad68b8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupCase_checker.go @@ -5,13 +5,13 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "dupCase" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects duplicated case clauses inside switch or select statements" info.Before = ` switch x { diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go index 54658eb9f4..ed674eb85c 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupImports_checker.go @@ -4,18 +4,18 @@ import ( "fmt" "go/ast" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "dupImport" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects multiple imports of the same package under different aliases" info.Before = ` import ( "fmt" - priting "fmt" // Imported the second time + printing "fmt" // Imported the second time )` info.After = ` import( diff --git a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go index 00f8fd0eb5..9ab75945cd 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/dupSubExpr_checker.go @@ -6,7 +6,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" ) @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "dupSubExpr" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious duplicated sub-expressions" info.Before = ` sort.Slice(xs, func(i, j int) bool { diff --git a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go index dcc964846f..857d09fa0e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/elseif_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astp" ) func init() { var info linter.CheckerInfo info.Name = "elseif" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Params = linter.CheckerParams{ "skipBalanced": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go b/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go index b17178e09a..ad507425e6 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go +++ b/vendor/github.com/go-critic/go-critic/checkers/embedded_rules.go @@ -7,10 +7,10 @@ import ( "go/token" "os" - "github.com/quasilyte/go-ruleguard/ruleguard" - "github.com/go-critic/go-critic/checkers/rulesdata" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + + "github.com/quasilyte/go-ruleguard/ruleguard" ) //go:generate go run ./rules/precompile.go -rules ./rules/rules.go -o ./rulesdata/rulesdata.go @@ -101,6 +101,7 @@ func (c *embeddedRuleguardChecker) WalkFile(f *ast.File) { Pkg: c.ctx.Pkg, Types: c.ctx.TypesInfo, Sizes: c.ctx.SizesInfo, + GoVersion: ruleguard.GoVersion(c.ctx.GoVersion), Fset: c.ctx.FileSet, TruncateLen: 100, }) diff --git a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go index ebb8dad455..a008c61870 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/emptyFallthrough_checker.go @@ -5,13 +5,13 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "emptyFallthrough" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects fallthrough that can be avoided by using multi case values" info.Before = `switch kind { case reflect.Int: diff --git a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go index 6ba07fe869..f8c5ae5423 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/evalOrder_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "evalOrder" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects unwanted dependencies on the evaluation order" info.Before = `return x, f(&x)` info.After = ` diff --git a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go index 63e0049f2c..9889f48e8e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/exitAfterDefer_checker.go @@ -4,7 +4,8 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astfmt" "github.com/go-toolsmith/astp" "golang.org/x/tools/go/ast/astutil" @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "exitAfterDefer" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects calls to exit/fatal inside functions that use defer" info.Before = ` defer os.Remove(filename) diff --git a/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go index 698f5366d6..17ab0ea83f 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/filepathJoin_checker.go @@ -5,14 +5,15 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "filepathJoin" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects problems in filepath.Join() function calls" info.Before = `filepath.Join("dir/", filename)` info.After = `filepath.Join("dir", filename)` diff --git a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go index 7f6ce3c01f..7010668608 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/flagName_checker.go @@ -7,14 +7,15 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "flagName" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious flag names" info.Before = `b := flag.Bool(" foo ", false, "description")` info.After = `b := flag.Bool("foo", false, "description")` @@ -63,7 +64,7 @@ func (c *flagNameChecker) checkFlagName(call *ast.CallExpr, arg ast.Expr) { case name == "": c.warnEmpty(call) case strings.HasPrefix(name, "-"): - c.warnHypenPrefix(call, name) + c.warnHyphenPrefix(call, name) case strings.Contains(name, "="): c.warnEq(call, name) case strings.Contains(name, " "): @@ -75,8 +76,8 @@ func (c *flagNameChecker) warnEmpty(cause ast.Node) { c.ctx.Warn(cause, "empty flag name") } -func (c *flagNameChecker) warnHypenPrefix(cause ast.Node, name string) { - c.ctx.Warn(cause, "flag name %q should not start with a hypen", name) +func (c *flagNameChecker) warnHyphenPrefix(cause ast.Node, name string) { + c.ctx.Warn(cause, "flag name %q should not start with a hyphen", name) } func (c *flagNameChecker) warnEq(cause ast.Node, name string) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go index ae61a1125e..7301bd325a 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/hexLiteral_checker.go @@ -6,14 +6,15 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "hexLiteral" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects hex literals that have mixed case letter digits" info.Before = ` x := 0X12 diff --git a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go index 910be180b2..7b7a3c538b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/hugeParam_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" - "golang.org/x/exp/typeparams" + "github.com/go-critic/go-critic/linter" + + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "hugeParam" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Params = linter.CheckerParams{ "sizeThreshold": { Value: 80, @@ -40,19 +41,34 @@ type hugeParamChecker struct { func (c *hugeParamChecker) VisitFuncDecl(decl *ast.FuncDecl) { // TODO(quasilyte): maybe it's worthwhile to permit skipping // test files for this checker? + if c.isImplementStringer(decl) { + return + } + if decl.Recv != nil { c.checkParams(decl.Recv.List) } c.checkParams(decl.Type.Params.List) } +// isImplementStringer check method signature is: String() string. +func (*hugeParamChecker) isImplementStringer(decl *ast.FuncDecl) bool { + if decl.Recv != nil && + decl.Name.Name == "String" && + decl.Type != nil && + len(decl.Type.Params.List) == 0 && + len(decl.Type.Results.List) == 1 && + astcast.ToIdent(decl.Type.Results.List[0].Type).Name == "string" { + return true + } + + return false +} + func (c *hugeParamChecker) checkParams(params []*ast.Field) { for _, p := range params { for _, id := range p.Names { typ := c.ctx.TypeOf(id) - if _, ok := typ.(*typeparams.TypeParam); ok { - continue - } size, ok := c.ctx.SizeOf(typ) if ok && size >= c.sizeThreshold { c.warn(id, size) diff --git a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go index b1fcf41472..e73c609d5c 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/ifElseChain_checker.go @@ -4,13 +4,19 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "ifElseChain" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} + info.Params = linter.CheckerParams{ + "minThreshold": { + Value: 2, + Usage: "min number of if-else blocks that makes the warning trigger", + }, + } info.Summary = "Detects repeated if-else statements and suggests to replace them with switch statement" info.Before = ` if cond1 { @@ -35,7 +41,10 @@ will trigger suggestion to use switch statement. See [EffectiveGo#switch](https://golang.org/doc/effective_go.html#switch).` collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { - return astwalk.WalkerForStmt(&ifElseChainChecker{ctx: ctx}), nil + return astwalk.WalkerForStmt(&ifElseChainChecker{ + ctx: ctx, + minThreshold: info.Params.Int("minThreshold"), + }), nil }) } @@ -45,6 +54,8 @@ type ifElseChainChecker struct { cause *ast.IfStmt visited map[*ast.IfStmt]bool + + minThreshold int } func (c *ifElseChainChecker) EnterFunc(fn *ast.FuncDecl) bool { @@ -66,8 +77,7 @@ func (c *ifElseChainChecker) VisitStmt(stmt ast.Stmt) { } func (c *ifElseChainChecker) checkIfStmt(stmt *ast.IfStmt) { - const minThreshold = 2 - if c.countIfelseLen(stmt) >= minThreshold { + if c.countIfelseLen(stmt) >= c.minThreshold { c.warn() } } @@ -75,11 +85,12 @@ func (c *ifElseChainChecker) checkIfStmt(stmt *ast.IfStmt) { func (c *ifElseChainChecker) countIfelseLen(stmt *ast.IfStmt) int { count := 0 for { + if stmt.Init != nil { + return 0 // Give up + } + switch e := stmt.Else.(type) { case *ast.IfStmt: - if e.Init != nil { - return 0 // Give up - } // Else if. stmt = e count++ diff --git a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go index 5ac711fc1e..b690487b7b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/importShadow_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "importShadow" - info.Tags = []string{"style", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} info.Summary = "Detects when imported package names shadowed in the assignments" info.Before = ` // "path/filepath" is imported. diff --git a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go index a1b6b2a8a8..8612717b27 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/initClause_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astp" ) func init() { var info linter.CheckerInfo info.Name = "initClause" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects non-assignment statements inside if/switch init clause" info.Before = `if sideEffect(); cond { }` diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go index 47de589a46..0c9c14955e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/local_def_visitor.go @@ -7,9 +7,9 @@ import ( // LocalDefVisitor visits every name definitions inside a function. // // Next elements are considered as name definitions: -// - Function parameters (input, output, receiver) -// - Every LHS of ":=" assignment that defines a new name -// - Every local var/const declaration. +// - Function parameters (input, output, receiver) +// - Every LHS of ":=" assignment that defines a new name +// - Every local var/const declaration. // // NOTE: this visitor is experimental. // This is also why it lives in a separate file. @@ -18,20 +18,18 @@ type LocalDefVisitor interface { VisitLocalDef(Name, ast.Expr) } -type ( - // NameKind describes what kind of name Name object holds. - NameKind int +// NameKind describes what kind of name Name object holds. +type NameKind int - // Name holds ver/const/param definition symbol info. - Name struct { - ID *ast.Ident - Kind NameKind +// Name holds ver/const/param definition symbol info. +type Name struct { + ID *ast.Ident + Kind NameKind - // Index is NameVar-specific field that is used to - // specify nth tuple element being assigned to the name. - Index int - } -) + // Index is NameVar-specific field that is used to + // specify nth tuple element being assigned to the name. + Index int +} // NOTE: set of name kinds is not stable and may change over time. // diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go index e5031a909f..3486a8e622 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/visitor.go @@ -4,67 +4,64 @@ import ( "go/ast" ) -// Visitor interfaces. -type ( - // DocCommentVisitor visits every doc-comment. - // Does not visit doc-comments for function-local definitions (types, etc). - // Also does not visit package doc-comment (file-level doc-comments). - DocCommentVisitor interface { - VisitDocComment(*ast.CommentGroup) - } +// DocCommentVisitor visits every doc-comment. +// Does not visit doc-comments for function-local definitions (types, etc). +// Also does not visit package doc-comment (file-level doc-comments). +type DocCommentVisitor interface { + VisitDocComment(*ast.CommentGroup) +} - // FuncDeclVisitor visits every top-level function declaration. - FuncDeclVisitor interface { - walkerEvents - VisitFuncDecl(*ast.FuncDecl) - } +// FuncDeclVisitor visits every top-level function declaration. +type FuncDeclVisitor interface { + walkerEvents + VisitFuncDecl(*ast.FuncDecl) +} - // ExprVisitor visits every expression inside AST file. - ExprVisitor interface { - walkerEvents - VisitExpr(ast.Expr) - } +// ExprVisitor visits every expression inside AST file. +type ExprVisitor interface { + walkerEvents + VisitExpr(ast.Expr) +} - // LocalExprVisitor visits every expression inside function body. - LocalExprVisitor interface { - walkerEvents - VisitLocalExpr(ast.Expr) - } +// LocalExprVisitor visits every expression inside function body. +type LocalExprVisitor interface { + walkerEvents + VisitLocalExpr(ast.Expr) +} - // StmtListVisitor visits every statement list inside function body. - // This includes block statement bodies as well as implicit blocks - // introduced by case clauses and alike. - StmtListVisitor interface { - walkerEvents - VisitStmtList(ast.Node, []ast.Stmt) - } +// StmtListVisitor visits every statement list inside function body. +// This includes block statement bodies as well as implicit blocks +// introduced by case clauses and alike. +type StmtListVisitor interface { + walkerEvents + VisitStmtList(ast.Node, []ast.Stmt) +} - // StmtVisitor visits every statement inside function body. - StmtVisitor interface { - walkerEvents - VisitStmt(ast.Stmt) - } +// StmtVisitor visits every statement inside function body. +type StmtVisitor interface { + walkerEvents + VisitStmt(ast.Stmt) +} - // TypeExprVisitor visits every type describing expression. - // It also traverses struct types and interface types to run - // checker over their fields/method signatures. - TypeExprVisitor interface { - walkerEvents - VisitTypeExpr(ast.Expr) - } +// TypeExprVisitor visits every type describing expression. +// It also traverses struct types and interface types to run +// checker over their fields/method signatures. +type TypeExprVisitor interface { + walkerEvents + VisitTypeExpr(ast.Expr) +} - // LocalCommentVisitor visits every comment inside function body. - LocalCommentVisitor interface { - walkerEvents - VisitLocalComment(*ast.CommentGroup) - } +// LocalCommentVisitor visits every comment inside function body. +type LocalCommentVisitor interface { + walkerEvents + VisitLocalComment(*ast.CommentGroup) +} - // CommentVisitor visits every comment. - CommentVisitor interface { - walkerEvents - VisitComment(*ast.CommentGroup) - } -) +// CommentVisitor visits every comment. +type CommentVisitor interface { + walkerEvents + VisitComment(*ast.CommentGroup) +} // walkerEvents describes common hooks available for most visitor types. type walkerEvents interface { diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go index cd5e1c9793..f838a64c15 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/astwalk/walker.go @@ -3,7 +3,7 @@ package astwalk import ( "go/types" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) // WalkerForFuncDecl returns file walker implementation for FuncDeclVisitor. diff --git a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go index 63d181e5eb..f64907d69b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go +++ b/vendor/github.com/go-critic/go-critic/checkers/internal/lintutil/astflow.go @@ -18,7 +18,7 @@ import ( // // If proven really useful, can be moved to go-toolsmith library. -// IsImmutable reports whether n can be midified through any operation. +// IsImmutable reports whether n can be modified through any operation. func IsImmutable(info *types.Info, n ast.Expr) bool { if astp.IsBasicLit(n) { return true diff --git a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go index 64c2821dd1..2885dc7254 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/mapKey_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astp" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "mapKey" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects suspicious map literal keys" info.Before = ` _ = map[string]int{ @@ -116,7 +117,7 @@ func (c *mapKeyChecker) checkWhitespace(lit *ast.CompositeLit) { } func (c *mapKeyChecker) warnWhitespace(key ast.Node) { - c.ctx.Warn(key, "suspucious whitespace in %s key", key) + c.ctx.Warn(key, "suspicious whitespace in %s key", key) } func (c *mapKeyChecker) warnDupKey(key ast.Node) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go index 2553def14f..755d3b4722 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/methodExprCall_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/typep" @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "methodExprCall" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects method expression call that can be replaced with a method call" info.Before = `f := foo{} foo.bar(f)` diff --git a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go index a68acecca5..dfe73018c8 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/nestingReduce_checker.go @@ -4,13 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "nestingReduce" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "bodyWidth": { Value: 5, diff --git a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go index 7e564b70f9..1a1b05e0df 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/newDeref_checker.go @@ -2,10 +2,12 @@ package checkers import ( "go/ast" + "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "golang.org/x/tools/go/ast/astutil" ) @@ -13,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "newDeref" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects immediate dereferencing of `new` expressions" info.Before = `x := *new(bool)` info.After = `x := false` @@ -33,6 +35,10 @@ func (c *newDerefChecker) VisitExpr(expr ast.Expr) { call := astcast.ToCallExpr(deref.X) if astcast.ToIdent(call.Fun).Name == "new" { typ := c.ctx.TypeOf(call.Args[0]) + // allow *new(T) if T is a type parameter, see #1272 for details + if _, ok := typ.(*types.TypeParam); ok { + return + } zv := lintutil.ZeroValueOf(astutil.Unparen(call.Args[0]), typ) if zv != nil { c.warn(expr, zv) diff --git a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go index 0a8e793eea..9a1213f5c2 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/nilValReturn_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "nilValReturn" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects return statements those results evaluate to nil" info.Before = ` if err == nil { diff --git a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go index bed227ac3d..a25fac85cc 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/octalLiteral_checker.go @@ -7,14 +7,15 @@ import ( "unicode" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "octalLiteral" - info.Tags = []string{"style", "experimental", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag, linter.OpinionatedTag} info.Summary = "Detects old-style octal literals" info.Before = `foo(02)` info.After = `foo(0o2)` @@ -30,6 +31,9 @@ type octalLiteralChecker struct { } func (c *octalLiteralChecker) VisitExpr(expr ast.Expr) { + if !c.ctx.GoVersion.GreaterOrEqual(linter.GoVersion{Major: 1, Minor: 13}) { + return + } lit := astcast.ToBasicLit(expr) if lit.Kind != token.INT { return diff --git a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go index c80e6f8bcd..c777fec9e6 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/paramTypeCombine_checker.go @@ -4,7 +4,8 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" ) @@ -12,7 +13,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "paramTypeCombine" - info.Tags = []string{"style", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} info.Summary = "Detects if function parameters could be combined by type and suggest the way to do it" info.Before = `func foo(a, b int, c, d int, e, f int, g int) {}` info.After = `func foo(a, b, c, d, e, f, g int) {}` @@ -46,6 +47,7 @@ func (c *paramTypeCombineChecker) optimizeFuncType(f *ast.FuncType) *ast.FuncTyp return optimizedParamFunc } + func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.FieldList { // To avoid false positives, skip unnamed param lists. // @@ -71,8 +73,7 @@ func (c *paramTypeCombineChecker) optimizeParams(params *ast.FieldList) *ast.Fie names = make([]*ast.Ident, len(p.Names)) copy(names, p.Names) if astequal.Expr(p.Type, params.List[i].Type) { - list[len(list)-1].Names = - append(list[len(list)-1].Names, names...) + list[len(list)-1].Names = append(list[len(list)-1].Names, names...) } else { list = append(list, &ast.Field{ Names: names, diff --git a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go index 88c8f4cb36..172a4acb58 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/ptrToRefParam_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "ptrToRefParam" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects input and output parameters that have a type of pointer to referential type" info.Before = `func f(m *map[string]int) (*chan *int)` info.After = `func f(m map[string]int) (chan *int)` diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go index 813fff36a4..3f61ee0bda 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/rangeExprCopy_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "rangeExprCopy" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Params = linter.CheckerParams{ "sizeThreshold": { Value: 512, diff --git a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go index eafc549d60..6d15c30cd1 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/rangeValCopy_checker.go @@ -4,14 +4,13 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" - "golang.org/x/exp/typeparams" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "rangeValCopy" - info.Tags = []string{"performance"} + info.Tags = []string{linter.PerformanceTag} info.Params = linter.CheckerParams{ "sizeThreshold": { Value: 128, @@ -66,10 +65,8 @@ func (c *rangeValCopyChecker) VisitStmt(stmt ast.Stmt) { if typ == nil { return } - if _, ok := typ.(*typeparams.TypeParam); ok { - return - } - if size, ok := c.ctx.SizeOf(typ); ok && size >= c.sizeThreshold { + size, ok := c.ctx.SizeOf(typ) + if ok && size >= c.sizeThreshold { c.warn(rng, size) } } diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go index 31dc4aad3e..45aba261ba 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpPattern_checker.go @@ -7,13 +7,13 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "regexpPattern" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious regexp patterns" info.Before = "regexp.MustCompile(`google.com|yandex.ru`)" info.After = "regexp.MustCompile(`google\\.com|yandex\\.ru`)" diff --git a/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go index 5b15e05ed2..f500f43500 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/regexpSimplify_checker.go @@ -8,16 +8,16 @@ import ( "strings" "unicode/utf8" - "github.com/quasilyte/regex/syntax" - "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + + "github.com/quasilyte/regex/syntax" ) func init() { var info linter.CheckerInfo info.Name = "regexpSimplify" - info.Tags = []string{"style", "experimental", "opinionated"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag, linter.OpinionatedTag} info.Summary = "Detects regexp patterns that can be simplified" info.Before = "regexp.MustCompile(`(?:a|b|c) [a-z][a-z]*`)" info.After = "regexp.MustCompile(`[abc] {3}[a-z]+`)" diff --git a/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go index 35c6a64490..29723a69a9 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/ruleguard_checker.go @@ -12,15 +12,15 @@ import ( "sort" "strings" - "github.com/quasilyte/go-ruleguard/ruleguard" + "github.com/go-critic/go-critic/linter" - "github.com/go-critic/go-critic/framework/linter" + "github.com/quasilyte/go-ruleguard/ruleguard" ) func init() { var info linter.CheckerInfo info.Name = "ruleguard" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "rules": { Value: "", @@ -84,7 +84,7 @@ func newErrorHandler(failOnErrorFlag string) (*parseErrorHandler, error) { h := parseErrorHandler{ failureConditions: make(map[string]func(err error) bool), } - var failOnErrorPredicates = map[string]func(error) bool{ + failOnErrorPredicates := map[string]func(error) bool{ "dsl": func(err error) bool { var e *ruleguard.ImportError; return !errors.As(err, &e) }, "import": func(err error) bool { var e *ruleguard.ImportError; return errors.As(err, &e) }, "all": func(err error) bool { return true }, @@ -160,8 +160,8 @@ func newRuleguardChecker(info *linter.CheckerInfo, ctx *linter.CheckerContext) ( } } - if !enabledTags["experimental"] { - disabledTags["experimental"] = true + if !enabledTags[linter.ExperimentalTag] { + disabledTags[linter.ExperimentalTag] = true } ruleguardDebug := os.Getenv("GOCRITIC_RULEGUARD_DEBUG") != "" @@ -274,7 +274,7 @@ func (c *ruleguardChecker) WalkFile(f *ast.File) { func runRuleguardEngine(ctx *linter.CheckerContext, f *ast.File, e *ruleguard.Engine, runCtx *ruleguard.RunContext) { type ruleguardReport struct { - node ast.Node + pos token.Pos message string fix linter.QuickFix } @@ -284,7 +284,7 @@ func runRuleguardEngine(ctx *linter.CheckerContext, f *ast.File, e *ruleguard.En // TODO(quasilyte): investigate whether we should add a rule name as // a message prefix here. r := ruleguardReport{ - node: data.Node, + pos: data.Node.Pos(), message: data.Message, } fix := data.Suggestion @@ -310,9 +310,9 @@ func runRuleguardEngine(ctx *linter.CheckerContext, f *ast.File, e *ruleguard.En }) for _, report := range reports { if report.fix.Replacement != nil { - ctx.WarnFixable(report.node, report.fix, "%s", report.message) + ctx.WarnFixableWithPos(report.pos, report.fix, "%s", report.message) } else { - ctx.Warn(report.node, "%s", report.message) + ctx.WarnWithPos(report.pos, "%s", report.message) } } } diff --git a/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go b/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go index b5dc58236c..c568df3998 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go +++ b/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go @@ -61,37 +61,20 @@ var PrecompiledRules = &ir.File{ {Line: 17, Value: "fmt.Sprintf(\"%s\", $x)"}, {Line: 17, Value: "fmt.Sprintf(\"%v\", $x)"}, }, - ReportTemplate: "use $x.Error() instead", - SuggestTemplate: "$x.Error()", - WhereExpr: ir.FilterExpr{ - Line: 18, - Op: ir.FilterVarTypeImplementsOp, - Src: "m[\"x\"].Type.Implements(`error`)", - Value: "x", - Args: []ir.FilterExpr{{Line: 18, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, - }, - }, - { - Line: 22, - SyntaxPatterns: []ir.PatternString{ - {Line: 22, Value: "fmt.Sprint($x)"}, - {Line: 22, Value: "fmt.Sprintf(\"%s\", $x)"}, - {Line: 22, Value: "fmt.Sprintf(\"%v\", $x)"}, - }, ReportTemplate: "$x is already string", SuggestTemplate: "$x", WhereExpr: ir.FilterExpr{ - Line: 23, + Line: 18, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 23, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 18, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 32, + Line: 27, Name: "deferUnlambda", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -100,55 +83,55 @@ var PrecompiledRules = &ir.File{ DocAfter: "defer f()", Rules: []ir.Rule{ { - Line: 33, - SyntaxPatterns: []ir.PatternString{{Line: 33, Value: "defer func() { $f($*args) }()"}}, + Line: 28, + SyntaxPatterns: []ir.PatternString{{Line: 28, Value: "defer func() { $f($*args) }()"}}, ReportTemplate: "can rewrite as `defer $f($args)`", WhereExpr: ir.FilterExpr{ - Line: 34, + Line: 29, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"f\"].Text != \"panic\" && m[\"f\"].Text != \"recover\" && m[\"args\"].Const", Args: []ir.FilterExpr{ { - Line: 34, + Line: 29, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"f\"].Text != \"panic\" && m[\"f\"].Text != \"recover\"", Args: []ir.FilterExpr{ { - Line: 34, + Line: 29, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"f\"].Text != \"panic\"", Args: []ir.FilterExpr{ { - Line: 34, + Line: 29, Op: ir.FilterVarNodeIsOp, Src: "m[\"f\"].Node.Is(`Ident`)", Value: "f", - Args: []ir.FilterExpr{{Line: 34, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 29, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }, { - Line: 34, + Line: 29, Op: ir.FilterNeqOp, Src: "m[\"f\"].Text != \"panic\"", Args: []ir.FilterExpr{ - {Line: 34, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, - {Line: 34, Op: ir.FilterStringOp, Src: "\"panic\"", Value: "panic"}, + {Line: 29, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, + {Line: 29, Op: ir.FilterStringOp, Src: "\"panic\"", Value: "panic"}, }, }, }, }, { - Line: 34, + Line: 29, Op: ir.FilterNeqOp, Src: "m[\"f\"].Text != \"recover\"", Args: []ir.FilterExpr{ - {Line: 34, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, - {Line: 34, Op: ir.FilterStringOp, Src: "\"recover\"", Value: "recover"}, + {Line: 29, Op: ir.FilterVarTextOp, Src: "m[\"f\"].Text", Value: "f"}, + {Line: 29, Op: ir.FilterStringOp, Src: "\"recover\"", Value: "recover"}, }, }, }, }, { - Line: 34, + Line: 29, Op: ir.FilterVarConstOp, Src: "m[\"args\"].Const", Value: "args", @@ -157,28 +140,28 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 37, - SyntaxPatterns: []ir.PatternString{{Line: 37, Value: "defer func() { $pkg.$f($*args) }()"}}, + Line: 32, + SyntaxPatterns: []ir.PatternString{{Line: 32, Value: "defer func() { $pkg.$f($*args) }()"}}, ReportTemplate: "can rewrite as `defer $pkg.$f($args)`", WhereExpr: ir.FilterExpr{ - Line: 38, + Line: 33, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"args\"].Const && m[\"pkg\"].Object.Is(`PkgName`)", Args: []ir.FilterExpr{ { - Line: 38, + Line: 33, Op: ir.FilterAndOp, Src: "m[\"f\"].Node.Is(`Ident`) && m[\"args\"].Const", Args: []ir.FilterExpr{ { - Line: 38, + Line: 33, Op: ir.FilterVarNodeIsOp, Src: "m[\"f\"].Node.Is(`Ident`)", Value: "f", - Args: []ir.FilterExpr{{Line: 38, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 33, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }, { - Line: 38, + Line: 33, Op: ir.FilterVarConstOp, Src: "m[\"args\"].Const", Value: "args", @@ -186,11 +169,11 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 38, + Line: 33, Op: ir.FilterVarObjectIsOp, Src: "m[\"pkg\"].Object.Is(`PkgName`)", Value: "pkg", - Args: []ir.FilterExpr{{Line: 38, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, + Args: []ir.FilterExpr{{Line: 33, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, }, }, }, @@ -198,84 +181,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 46, - Name: "ioutilDeprecated", - MatcherName: "m", - DocTags: []string{"style", "experimental"}, - DocSummary: "Detects deprecated io/ioutil package usages", - DocBefore: "ioutil.ReadAll(r)", - DocAfter: "io.ReadAll(r)", - Rules: []ir.Rule{ - { - Line: 47, - SyntaxPatterns: []ir.PatternString{{Line: 47, Value: "ioutil.ReadAll($_)"}}, - ReportTemplate: "ioutil.ReadAll is deprecated, use io.ReadAll instead", - WhereExpr: ir.FilterExpr{ - Line: 48, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 51, - SyntaxPatterns: []ir.PatternString{{Line: 51, Value: "ioutil.ReadFile($_)"}}, - ReportTemplate: "ioutil.ReadFile is deprecated, use os.ReadFile instead", - WhereExpr: ir.FilterExpr{ - Line: 52, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 55, - SyntaxPatterns: []ir.PatternString{{Line: 55, Value: "ioutil.WriteFile($_, $_, $_)"}}, - ReportTemplate: "ioutil.WriteFile is deprecated, use os.WriteFile instead", - WhereExpr: ir.FilterExpr{ - Line: 56, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 59, - SyntaxPatterns: []ir.PatternString{{Line: 59, Value: "ioutil.ReadDir($_)"}}, - ReportTemplate: "ioutil.ReadDir is deprecated, use os.ReadDir instead", - WhereExpr: ir.FilterExpr{ - Line: 60, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 63, - SyntaxPatterns: []ir.PatternString{{Line: 63, Value: "ioutil.NopCloser($_)"}}, - ReportTemplate: "ioutil.NopCloser is deprecated, use io.NopCloser instead", - WhereExpr: ir.FilterExpr{ - Line: 64, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - { - Line: 67, - SyntaxPatterns: []ir.PatternString{{Line: 67, Value: "ioutil.Discard"}}, - ReportTemplate: "ioutil.Discard is deprecated, use io.Discard instead", - WhereExpr: ir.FilterExpr{ - Line: 68, - Op: ir.FilterGoVersionGreaterEqThanOp, - Src: "m.GoVersion().GreaterEqThan(\"1.16\")", - Value: "1.16", - }, - }, - }, - }, - { - Line: 76, + Line: 41, Name: "badLock", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -284,91 +190,91 @@ var PrecompiledRules = &ir.File{ DocAfter: "mu.Lock(); defer mu.Unlock()", Rules: []ir.Rule{ { - Line: 80, - SyntaxPatterns: []ir.PatternString{{Line: 80, Value: "$mu1.Lock(); $mu2.Unlock()"}}, + Line: 45, + SyntaxPatterns: []ir.PatternString{{Line: 45, Value: "$mu1.Lock(); $mu2.Unlock()"}}, ReportTemplate: "defer is missing, mutex is unlocked immediately", WhereExpr: ir.FilterExpr{ - Line: 81, + Line: 46, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 81, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 81, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 46, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 46, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 85, - SyntaxPatterns: []ir.PatternString{{Line: 85, Value: "$mu1.RLock(); $mu2.RUnlock()"}}, + Line: 50, + SyntaxPatterns: []ir.PatternString{{Line: 50, Value: "$mu1.RLock(); $mu2.RUnlock()"}}, ReportTemplate: "defer is missing, mutex is unlocked immediately", WhereExpr: ir.FilterExpr{ - Line: 86, + Line: 51, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 86, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 86, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 51, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 51, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 91, - SyntaxPatterns: []ir.PatternString{{Line: 91, Value: "$mu1.Lock(); defer $mu2.RUnlock()"}}, + Line: 56, + SyntaxPatterns: []ir.PatternString{{Line: 56, Value: "$mu1.Lock(); defer $mu2.RUnlock()"}}, ReportTemplate: "suspicious unlock, maybe Unlock was intended?", WhereExpr: ir.FilterExpr{ - Line: 92, + Line: 57, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 92, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 92, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 57, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 57, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 96, - SyntaxPatterns: []ir.PatternString{{Line: 96, Value: "$mu1.RLock(); defer $mu2.Unlock()"}}, + Line: 61, + SyntaxPatterns: []ir.PatternString{{Line: 61, Value: "$mu1.RLock(); defer $mu2.Unlock()"}}, ReportTemplate: "suspicious unlock, maybe RUnlock was intended?", WhereExpr: ir.FilterExpr{ - Line: 97, + Line: 62, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 97, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 97, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 62, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 62, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 102, - SyntaxPatterns: []ir.PatternString{{Line: 102, Value: "$mu1.Lock(); defer $mu2.Lock()"}}, + Line: 67, + SyntaxPatterns: []ir.PatternString{{Line: 67, Value: "$mu1.Lock(); defer $mu2.Lock()"}}, ReportTemplate: "maybe defer $mu1.Unlock() was intended?", WhereExpr: ir.FilterExpr{ - Line: 103, + Line: 68, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 103, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 103, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 68, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 68, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", }, { - Line: 107, - SyntaxPatterns: []ir.PatternString{{Line: 107, Value: "$mu1.RLock(); defer $mu2.RLock()"}}, + Line: 72, + SyntaxPatterns: []ir.PatternString{{Line: 72, Value: "$mu1.RLock(); defer $mu2.RLock()"}}, ReportTemplate: "maybe defer $mu1.RUnlock() was intended?", WhereExpr: ir.FilterExpr{ - Line: 108, + Line: 73, Op: ir.FilterEqOp, Src: "m[\"mu1\"].Text == m[\"mu2\"].Text", Args: []ir.FilterExpr{ - {Line: 108, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, - {Line: 108, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, + {Line: 73, Op: ir.FilterVarTextOp, Src: "m[\"mu1\"].Text", Value: "mu1"}, + {Line: 73, Op: ir.FilterVarTextOp, Src: "m[\"mu2\"].Text", Value: "mu2"}, }, }, LocationVar: "mu2", @@ -376,7 +282,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 117, + Line: 82, Name: "httpNoBody", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -385,39 +291,54 @@ var PrecompiledRules = &ir.File{ DocAfter: "http.NewRequest(\"GET\", url, http.NoBody)", Rules: []ir.Rule{ { - Line: 118, - SyntaxPatterns: []ir.PatternString{{Line: 118, Value: "http.NewRequest($method, $url, $nil)"}}, + Line: 83, + SyntaxPatterns: []ir.PatternString{{Line: 83, Value: "http.NewRequest($method, $url, $nil)"}}, ReportTemplate: "http.NoBody should be preferred to the nil request body", SuggestTemplate: "http.NewRequest($method, $url, http.NoBody)", WhereExpr: ir.FilterExpr{ - Line: 119, + Line: 84, Op: ir.FilterEqOp, Src: "m[\"nil\"].Text == \"nil\"", Args: []ir.FilterExpr{ - {Line: 119, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, - {Line: 119, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, + {Line: 84, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, + {Line: 84, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, }, }, }, { - Line: 123, - SyntaxPatterns: []ir.PatternString{{Line: 123, Value: "http.NewRequestWithContext($ctx, $method, $url, $nil)"}}, + Line: 88, + SyntaxPatterns: []ir.PatternString{{Line: 88, Value: "http.NewRequestWithContext($ctx, $method, $url, $nil)"}}, ReportTemplate: "http.NoBody should be preferred to the nil request body", SuggestTemplate: "http.NewRequestWithContext($ctx, $method, $url, http.NoBody)", WhereExpr: ir.FilterExpr{ - Line: 124, + Line: 89, Op: ir.FilterEqOp, Src: "m[\"nil\"].Text == \"nil\"", Args: []ir.FilterExpr{ - {Line: 124, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, - {Line: 124, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, + {Line: 89, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, + {Line: 89, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, + }, + }, + }, + { + Line: 93, + SyntaxPatterns: []ir.PatternString{{Line: 93, Value: "httptest.NewRequest($method, $url, $nil)"}}, + ReportTemplate: "http.NoBody should be preferred to the nil request body", + SuggestTemplate: "httptest.NewRequest($method, $url, http.NoBody)", + WhereExpr: ir.FilterExpr{ + Line: 94, + Op: ir.FilterEqOp, + Src: "m[\"nil\"].Text == \"nil\"", + Args: []ir.FilterExpr{ + {Line: 94, Op: ir.FilterVarTextOp, Src: "m[\"nil\"].Text", Value: "nil"}, + {Line: 94, Op: ir.FilterStringOp, Src: "\"nil\"", Value: "nil"}, }, }, }, }, }, { - Line: 134, + Line: 104, Name: "preferDecodeRune", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -426,46 +347,46 @@ var PrecompiledRules = &ir.File{ DocAfter: "r, _ := utf8.DecodeRuneInString(s)", DocNote: "See Go issue for details: https://github.com/golang/go/issues/45260", Rules: []ir.Rule{{ - Line: 135, - SyntaxPatterns: []ir.PatternString{{Line: 135, Value: "[]rune($s)[0]"}}, + Line: 105, + SyntaxPatterns: []ir.PatternString{{Line: 105, Value: "[]rune($s)[0]"}}, ReportTemplate: "consider replacing $$ with utf8.DecodeRuneInString($s)", WhereExpr: ir.FilterExpr{ - Line: 136, + Line: 106, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 136, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 106, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }}, }, { - Line: 144, + Line: 114, Name: "sloppyLen", MatcherName: "m", - DocTags: []string{"style"}, + DocTags: []string{"diagnostic"}, DocSummary: "Detects usage of `len` when result is obvious or doesn't make sense", DocBefore: "len(arr) <= 0", DocAfter: "len(arr) == 0", Rules: []ir.Rule{ { - Line: 145, - SyntaxPatterns: []ir.PatternString{{Line: 145, Value: "len($_) >= 0"}}, + Line: 115, + SyntaxPatterns: []ir.PatternString{{Line: 115, Value: "len($_) >= 0"}}, ReportTemplate: "$$ is always true", }, { - Line: 146, - SyntaxPatterns: []ir.PatternString{{Line: 146, Value: "len($_) < 0"}}, + Line: 116, + SyntaxPatterns: []ir.PatternString{{Line: 116, Value: "len($_) < 0"}}, ReportTemplate: "$$ is always false", }, { - Line: 147, - SyntaxPatterns: []ir.PatternString{{Line: 147, Value: "len($x) <= 0"}}, + Line: 117, + SyntaxPatterns: []ir.PatternString{{Line: 117, Value: "len($x) <= 0"}}, ReportTemplate: "$$ can be len($x) == 0", }, }, }, { - Line: 154, + Line: 124, Name: "valSwap", MatcherName: "m", DocTags: []string{"style"}, @@ -473,13 +394,13 @@ var PrecompiledRules = &ir.File{ DocBefore: "*tmp = *x; *x = *y; *y = *tmp", DocAfter: "*x, *y = *y, *x", Rules: []ir.Rule{{ - Line: 155, - SyntaxPatterns: []ir.PatternString{{Line: 155, Value: "$tmp := $y; $y = $x; $x = $tmp"}}, + Line: 125, + SyntaxPatterns: []ir.PatternString{{Line: 125, Value: "$tmp := $y; $y = $x; $x = $tmp"}}, ReportTemplate: "can re-write as `$y, $x = $x, $y`", }}, }, { - Line: 163, + Line: 133, Name: "switchTrue", MatcherName: "m", DocTags: []string{"style"}, @@ -488,19 +409,19 @@ var PrecompiledRules = &ir.File{ DocAfter: "switch {...}", Rules: []ir.Rule{ { - Line: 164, - SyntaxPatterns: []ir.PatternString{{Line: 164, Value: "switch true { $*_ }"}}, + Line: 134, + SyntaxPatterns: []ir.PatternString{{Line: 134, Value: "switch true { $*_ }"}}, ReportTemplate: "replace 'switch true {}' with 'switch {}'", }, { - Line: 166, - SyntaxPatterns: []ir.PatternString{{Line: 166, Value: "switch $x; true { $*_ }"}}, + Line: 136, + SyntaxPatterns: []ir.PatternString{{Line: 136, Value: "switch $x; true { $*_ }"}}, ReportTemplate: "replace 'switch $x; true {}' with 'switch $x; {}'", }, }, }, { - Line: 174, + Line: 144, Name: "flagDeref", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -509,49 +430,49 @@ var PrecompiledRules = &ir.File{ DocAfter: "var b bool; flag.BoolVar(&b, \"b\", false, \"b docs\")", Rules: []ir.Rule{ { - Line: 175, - SyntaxPatterns: []ir.PatternString{{Line: 175, Value: "*flag.Bool($*_)"}}, + Line: 145, + SyntaxPatterns: []ir.PatternString{{Line: 145, Value: "*flag.Bool($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.BoolVar", }, { - Line: 176, - SyntaxPatterns: []ir.PatternString{{Line: 176, Value: "*flag.Duration($*_)"}}, + Line: 146, + SyntaxPatterns: []ir.PatternString{{Line: 146, Value: "*flag.Duration($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.DurationVar", }, { - Line: 177, - SyntaxPatterns: []ir.PatternString{{Line: 177, Value: "*flag.Float64($*_)"}}, + Line: 147, + SyntaxPatterns: []ir.PatternString{{Line: 147, Value: "*flag.Float64($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.Float64Var", }, { - Line: 178, - SyntaxPatterns: []ir.PatternString{{Line: 178, Value: "*flag.Int($*_)"}}, + Line: 148, + SyntaxPatterns: []ir.PatternString{{Line: 148, Value: "*flag.Int($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.IntVar", }, { - Line: 179, - SyntaxPatterns: []ir.PatternString{{Line: 179, Value: "*flag.Int64($*_)"}}, + Line: 149, + SyntaxPatterns: []ir.PatternString{{Line: 149, Value: "*flag.Int64($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.Int64Var", }, { - Line: 180, - SyntaxPatterns: []ir.PatternString{{Line: 180, Value: "*flag.String($*_)"}}, + Line: 150, + SyntaxPatterns: []ir.PatternString{{Line: 150, Value: "*flag.String($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.StringVar", }, { - Line: 181, - SyntaxPatterns: []ir.PatternString{{Line: 181, Value: "*flag.Uint($*_)"}}, + Line: 151, + SyntaxPatterns: []ir.PatternString{{Line: 151, Value: "*flag.Uint($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.UintVar", }, { - Line: 182, - SyntaxPatterns: []ir.PatternString{{Line: 182, Value: "*flag.Uint64($*_)"}}, + Line: 152, + SyntaxPatterns: []ir.PatternString{{Line: 152, Value: "*flag.Uint64($*_)"}}, ReportTemplate: "immediate deref in $$ is most likely an error; consider using flag.Uint64Var", }, }, }, { - Line: 189, + Line: 159, Name: "emptyStringTest", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -560,33 +481,57 @@ var PrecompiledRules = &ir.File{ DocAfter: "s == \"\"", Rules: []ir.Rule{ { - Line: 190, - SyntaxPatterns: []ir.PatternString{{Line: 190, Value: "len($s) != 0"}}, + Line: 160, + SyntaxPatterns: []ir.PatternString{{Line: 160, Value: "len($s) != 0"}}, + ReportTemplate: "replace `$$` with `$s != \"\"`", + WhereExpr: ir.FilterExpr{ + Line: 161, + Op: ir.FilterVarTypeIsOp, + Src: "m[\"s\"].Type.Is(`string`)", + Value: "s", + Args: []ir.FilterExpr{{Line: 161, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + }, + }, + { + Line: 163, + SyntaxPatterns: []ir.PatternString{{Line: 163, Value: "len($s) > 0"}}, ReportTemplate: "replace `$$` with `$s != \"\"`", WhereExpr: ir.FilterExpr{ - Line: 191, + Line: 164, + Op: ir.FilterVarTypeIsOp, + Src: "m[\"s\"].Type.Is(`string`)", + Value: "s", + Args: []ir.FilterExpr{{Line: 164, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + }, + }, + { + Line: 167, + SyntaxPatterns: []ir.PatternString{{Line: 167, Value: "len($s) == 0"}}, + ReportTemplate: "replace `$$` with `$s == \"\"`", + WhereExpr: ir.FilterExpr{ + Line: 168, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 191, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 168, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, { - Line: 194, - SyntaxPatterns: []ir.PatternString{{Line: 194, Value: "len($s) == 0"}}, + Line: 170, + SyntaxPatterns: []ir.PatternString{{Line: 170, Value: "len($s) <= 0"}}, ReportTemplate: "replace `$$` with `$s == \"\"`", WhereExpr: ir.FilterExpr{ - Line: 195, + Line: 171, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 195, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 171, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 203, + Line: 179, Name: "stringXbytes", MatcherName: "m", DocTags: []string{"performance"}, @@ -595,180 +540,180 @@ var PrecompiledRules = &ir.File{ DocAfter: "copy(b, s)", Rules: []ir.Rule{ { - Line: 204, - SyntaxPatterns: []ir.PatternString{{Line: 204, Value: "copy($_, []byte($s))"}}, + Line: 180, + SyntaxPatterns: []ir.PatternString{{Line: 180, Value: "copy($_, []byte($s))"}}, ReportTemplate: "can simplify `[]byte($s)` to `$s`", }, { - Line: 206, - SyntaxPatterns: []ir.PatternString{{Line: 206, Value: "string($b) == \"\""}}, + Line: 182, + SyntaxPatterns: []ir.PatternString{{Line: 182, Value: "string($b) == \"\""}}, ReportTemplate: "suggestion: len($b) == 0", SuggestTemplate: "len($b) == 0", WhereExpr: ir.FilterExpr{ - Line: 206, + Line: 182, Op: ir.FilterVarTypeIsOp, Src: "m[\"b\"].Type.Is(`[]byte`)", Value: "b", - Args: []ir.FilterExpr{{Line: 206, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 182, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, { - Line: 207, - SyntaxPatterns: []ir.PatternString{{Line: 207, Value: "string($b) != \"\""}}, + Line: 183, + SyntaxPatterns: []ir.PatternString{{Line: 183, Value: "string($b) != \"\""}}, ReportTemplate: "suggestion: len($b) != 0", SuggestTemplate: "len($b) != 0", WhereExpr: ir.FilterExpr{ - Line: 207, + Line: 183, Op: ir.FilterVarTypeIsOp, Src: "m[\"b\"].Type.Is(`[]byte`)", Value: "b", - Args: []ir.FilterExpr{{Line: 207, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 183, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, { - Line: 209, - SyntaxPatterns: []ir.PatternString{{Line: 209, Value: "len(string($b))"}}, + Line: 185, + SyntaxPatterns: []ir.PatternString{{Line: 185, Value: "len(string($b))"}}, ReportTemplate: "suggestion: len($b)", SuggestTemplate: "len($b)", WhereExpr: ir.FilterExpr{ - Line: 209, + Line: 185, Op: ir.FilterVarTypeIsOp, Src: "m[\"b\"].Type.Is(`[]byte`)", Value: "b", - Args: []ir.FilterExpr{{Line: 209, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 185, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, { - Line: 211, - SyntaxPatterns: []ir.PatternString{{Line: 211, Value: "string($x) == string($y)"}}, + Line: 187, + SyntaxPatterns: []ir.PatternString{{Line: 187, Value: "string($x) == string($y)"}}, ReportTemplate: "suggestion: bytes.Equal($x, $y)", SuggestTemplate: "bytes.Equal($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 212, + Line: 188, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`[]byte`) && m[\"y\"].Type.Is(`[]byte`)", Args: []ir.FilterExpr{ { - Line: 212, + Line: 188, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]byte`)", Value: "x", - Args: []ir.FilterExpr{{Line: 212, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 188, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, { - Line: 212, + Line: 188, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`[]byte`)", Value: "y", - Args: []ir.FilterExpr{{Line: 212, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 188, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, }, }, { - Line: 215, - SyntaxPatterns: []ir.PatternString{{Line: 215, Value: "string($x) != string($y)"}}, + Line: 191, + SyntaxPatterns: []ir.PatternString{{Line: 191, Value: "string($x) != string($y)"}}, ReportTemplate: "suggestion: !bytes.Equal($x, $y)", SuggestTemplate: "!bytes.Equal($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 216, + Line: 192, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`[]byte`) && m[\"y\"].Type.Is(`[]byte`)", Args: []ir.FilterExpr{ { - Line: 216, + Line: 192, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]byte`)", Value: "x", - Args: []ir.FilterExpr{{Line: 216, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 192, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, { - Line: 216, + Line: 192, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`[]byte`)", Value: "y", - Args: []ir.FilterExpr{{Line: 216, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, + Args: []ir.FilterExpr{{Line: 192, Op: ir.FilterStringOp, Src: "`[]byte`", Value: "[]byte"}}, }, }, }, }, { - Line: 219, - SyntaxPatterns: []ir.PatternString{{Line: 219, Value: "$re.Match([]byte($s))"}}, + Line: 195, + SyntaxPatterns: []ir.PatternString{{Line: 195, Value: "$re.Match([]byte($s))"}}, ReportTemplate: "suggestion: $re.MatchString($s)", SuggestTemplate: "$re.MatchString($s)", WhereExpr: ir.FilterExpr{ - Line: 220, + Line: 196, Op: ir.FilterAndOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`) && m[\"s\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 220, + Line: 196, Op: ir.FilterVarTypeIsOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`)", Value: "re", - Args: []ir.FilterExpr{{Line: 220, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, + Args: []ir.FilterExpr{{Line: 196, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, }, { - Line: 220, + Line: 196, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 220, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 196, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 223, - SyntaxPatterns: []ir.PatternString{{Line: 223, Value: "$re.FindIndex([]byte($s))"}}, + Line: 199, + SyntaxPatterns: []ir.PatternString{{Line: 199, Value: "$re.FindIndex([]byte($s))"}}, ReportTemplate: "suggestion: $re.FindStringIndex($s)", SuggestTemplate: "$re.FindStringIndex($s)", WhereExpr: ir.FilterExpr{ - Line: 224, + Line: 200, Op: ir.FilterAndOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`) && m[\"s\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 224, + Line: 200, Op: ir.FilterVarTypeIsOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`)", Value: "re", - Args: []ir.FilterExpr{{Line: 224, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, + Args: []ir.FilterExpr{{Line: 200, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, }, { - Line: 224, + Line: 200, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 224, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 200, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }, { - Line: 227, - SyntaxPatterns: []ir.PatternString{{Line: 227, Value: "$re.FindAllIndex([]byte($s), $n)"}}, + Line: 203, + SyntaxPatterns: []ir.PatternString{{Line: 203, Value: "$re.FindAllIndex([]byte($s), $n)"}}, ReportTemplate: "suggestion: $re.FindAllStringIndex($s, $n)", SuggestTemplate: "$re.FindAllStringIndex($s, $n)", WhereExpr: ir.FilterExpr{ - Line: 228, + Line: 204, Op: ir.FilterAndOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`) && m[\"s\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 228, + Line: 204, Op: ir.FilterVarTypeIsOp, Src: "m[\"re\"].Type.Is(`*regexp.Regexp`)", Value: "re", - Args: []ir.FilterExpr{{Line: 228, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, + Args: []ir.FilterExpr{{Line: 204, Op: ir.FilterStringOp, Src: "`*regexp.Regexp`", Value: "*regexp.Regexp"}}, }, { - Line: 228, + Line: 204, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 228, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 204, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, @@ -776,7 +721,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 237, + Line: 213, Name: "indexAlloc", MatcherName: "m", DocTags: []string{"performance"}, @@ -785,22 +730,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "bytes.Index(x, []byte(y))", DocNote: "See Go issue for details: https://github.com/golang/go/issues/25864", Rules: []ir.Rule{{ - Line: 238, - SyntaxPatterns: []ir.PatternString{{Line: 238, Value: "strings.Index(string($x), $y)"}}, + Line: 214, + SyntaxPatterns: []ir.PatternString{{Line: 214, Value: "strings.Index(string($x), $y)"}}, ReportTemplate: "consider replacing $$ with bytes.Index($x, []byte($y))", WhereExpr: ir.FilterExpr{ - Line: 239, + Line: 215, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 239, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 239, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 215, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 215, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, }}, }, { - Line: 247, + Line: 223, Name: "wrapperFunc", MatcherName: "m", DocTags: []string{"style"}, @@ -809,83 +754,169 @@ var PrecompiledRules = &ir.File{ DocAfter: "wg.Done()", Rules: []ir.Rule{ { - Line: 248, - SyntaxPatterns: []ir.PatternString{{Line: 248, Value: "$wg.Add(-1)"}}, + Line: 224, + SyntaxPatterns: []ir.PatternString{{Line: 224, Value: "$wg.Add(-1)"}}, ReportTemplate: "use WaitGroup.Done method in `$$`", WhereExpr: ir.FilterExpr{ - Line: 249, + Line: 225, Op: ir.FilterVarTypeIsOp, Src: "m[\"wg\"].Type.Is(`sync.WaitGroup`)", Value: "wg", - Args: []ir.FilterExpr{{Line: 249, Op: ir.FilterStringOp, Src: "`sync.WaitGroup`", Value: "sync.WaitGroup"}}, + Args: []ir.FilterExpr{{Line: 225, Op: ir.FilterStringOp, Src: "`sync.WaitGroup`", Value: "sync.WaitGroup"}}, }, }, { - Line: 252, - SyntaxPatterns: []ir.PatternString{{Line: 252, Value: "$buf.Truncate(0)"}}, + Line: 228, + SyntaxPatterns: []ir.PatternString{{Line: 228, Value: "$buf.Truncate(0)"}}, ReportTemplate: "use Buffer.Reset method in `$$`", WhereExpr: ir.FilterExpr{ - Line: 253, + Line: 229, Op: ir.FilterVarTypeIsOp, Src: "m[\"buf\"].Type.Is(`bytes.Buffer`)", Value: "buf", - Args: []ir.FilterExpr{{Line: 253, Op: ir.FilterStringOp, Src: "`bytes.Buffer`", Value: "bytes.Buffer"}}, + Args: []ir.FilterExpr{{Line: 229, Op: ir.FilterStringOp, Src: "`bytes.Buffer`", Value: "bytes.Buffer"}}, }, }, { - Line: 256, - SyntaxPatterns: []ir.PatternString{{Line: 256, Value: "http.HandlerFunc(http.NotFound)"}}, + Line: 232, + SyntaxPatterns: []ir.PatternString{{Line: 232, Value: "http.HandlerFunc(http.NotFound)"}}, ReportTemplate: "use http.NotFoundHandler method in `$$`", }, { - Line: 258, - SyntaxPatterns: []ir.PatternString{{Line: 258, Value: "strings.SplitN($_, $_, -1)"}}, + Line: 234, + SyntaxPatterns: []ir.PatternString{{Line: 234, Value: "strings.SplitN($_, $_, -1)"}}, ReportTemplate: "use strings.Split method in `$$`", }, { - Line: 259, - SyntaxPatterns: []ir.PatternString{{Line: 259, Value: "strings.Replace($_, $_, $_, -1)"}}, + Line: 235, + SyntaxPatterns: []ir.PatternString{{Line: 235, Value: "strings.Replace($_, $_, $_, -1)"}}, ReportTemplate: "use strings.ReplaceAll method in `$$`", }, { - Line: 260, - SyntaxPatterns: []ir.PatternString{{Line: 260, Value: "strings.Map(unicode.ToTitle, $_)"}}, + Line: 236, + SyntaxPatterns: []ir.PatternString{{Line: 236, Value: "strings.Map(unicode.ToTitle, $_)"}}, ReportTemplate: "use strings.ToTitle method in `$$`", }, { - Line: 262, - SyntaxPatterns: []ir.PatternString{{Line: 262, Value: "bytes.SplitN(b, []byte(\".\"), -1)"}}, + Line: 237, + SyntaxPatterns: []ir.PatternString{ + {Line: 237, Value: "strings.Index($s1, $s2) >= 0"}, + {Line: 237, Value: "strings.Index($s1, $s2) != -1"}, + }, + ReportTemplate: "suggestion: strings.Contains($s1, $s2)", + SuggestTemplate: "strings.Contains($s1, $s2)", + }, + { + Line: 238, + SyntaxPatterns: []ir.PatternString{ + {Line: 238, Value: "strings.IndexAny($s1, $s2) >= 0"}, + {Line: 238, Value: "strings.IndexAny($s1, $s2) != -1"}, + }, + ReportTemplate: "suggestion: strings.ContainsAny($s1, $s2)", + SuggestTemplate: "strings.ContainsAny($s1, $s2)", + }, + { + Line: 239, + SyntaxPatterns: []ir.PatternString{ + {Line: 239, Value: "strings.IndexRune($s1, $s2) >= 0"}, + {Line: 239, Value: "strings.IndexRune($s1, $s2) != -1"}, + }, + ReportTemplate: "suggestion: strings.ContainsRune($s1, $s2)", + SuggestTemplate: "strings.ContainsRune($s1, $s2)", + }, + { + Line: 241, + SyntaxPatterns: []ir.PatternString{ + {Line: 241, Value: "$i := strings.Index($s, $sep); $*_; $x, $y = $s[:$i], $s[$i+1:]"}, + {Line: 242, Value: "$i := strings.Index($s, $sep); $*_; $x = $s[:$i]; $*_; $y = $s[$i+1:]"}, + }, + ReportTemplate: "suggestion: $x, $y, _ = strings.Cut($s, $sep)", + SuggestTemplate: "$x, $y, _ = strings.Cut($s, $sep)", + WhereExpr: ir.FilterExpr{ + Line: 243, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.18\")", + Value: "1.18", + }, + }, + { + Line: 246, + SyntaxPatterns: []ir.PatternString{ + {Line: 247, Value: "if $i := strings.Index($s, $sep); $i != -1 { $*_; $x, $y = $s[:$i], $s[$i+1:]; $*_ }"}, + {Line: 248, Value: "if $i := strings.Index($s, $sep); $i != -1 { $*_; $x = $s[:$i]; $*_; $y = $s[$i+1:]; $*_ }"}, + {Line: 249, Value: "if $i := strings.Index($s, $sep); $i >= 0 { $*_; $x, $y = $s[:$i], $s[$i+1:]; $*_ }"}, + {Line: 250, Value: "if $i := strings.Index($s, $sep); $i >= 0 { $*_; $x = $s[:$i]; $*_; $y = $s[$i+1:]; $*_ }"}, + }, + ReportTemplate: "suggestion: if $x, $y, ok = strings.Cut($s, $sep); ok { ... }", + SuggestTemplate: "if $x, $y, ok = strings.Cut($s, $sep); ok { ... }", + WhereExpr: ir.FilterExpr{ + Line: 251, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.18\")", + Value: "1.18", + }, + }, + { + Line: 254, + SyntaxPatterns: []ir.PatternString{{Line: 254, Value: "bytes.SplitN(b, []byte(\".\"), -1)"}}, ReportTemplate: "use bytes.Split method in `$$`", }, { - Line: 263, - SyntaxPatterns: []ir.PatternString{{Line: 263, Value: "bytes.Replace($_, $_, $_, -1)"}}, + Line: 255, + SyntaxPatterns: []ir.PatternString{{Line: 255, Value: "bytes.Replace($_, $_, $_, -1)"}}, ReportTemplate: "use bytes.ReplaceAll method in `$$`", }, { - Line: 264, - SyntaxPatterns: []ir.PatternString{{Line: 264, Value: "bytes.Map(unicode.ToUpper, $_)"}}, + Line: 256, + SyntaxPatterns: []ir.PatternString{{Line: 256, Value: "bytes.Map(unicode.ToUpper, $_)"}}, ReportTemplate: "use bytes.ToUpper method in `$$`", }, { - Line: 265, - SyntaxPatterns: []ir.PatternString{{Line: 265, Value: "bytes.Map(unicode.ToLower, $_)"}}, + Line: 257, + SyntaxPatterns: []ir.PatternString{{Line: 257, Value: "bytes.Map(unicode.ToLower, $_)"}}, ReportTemplate: "use bytes.ToLower method in `$$`", }, { - Line: 266, - SyntaxPatterns: []ir.PatternString{{Line: 266, Value: "bytes.Map(unicode.ToTitle, $_)"}}, + Line: 258, + SyntaxPatterns: []ir.PatternString{{Line: 258, Value: "bytes.Map(unicode.ToTitle, $_)"}}, ReportTemplate: "use bytes.ToTitle method in `$$`", }, { - Line: 268, - SyntaxPatterns: []ir.PatternString{{Line: 268, Value: "draw.DrawMask($_, $_, $_, $_, nil, image.Point{}, $_)"}}, + Line: 259, + SyntaxPatterns: []ir.PatternString{ + {Line: 259, Value: "bytes.Index($b1, $b2) >= 0"}, + {Line: 259, Value: "bytes.Index($b1, $b2) != -1"}, + }, + ReportTemplate: "suggestion: bytes.Contains($b1, $b2)", + SuggestTemplate: "bytes.Contains($b1, $b2)", + }, + { + Line: 260, + SyntaxPatterns: []ir.PatternString{ + {Line: 260, Value: "bytes.IndexAny($b1, $b2) >= 0"}, + {Line: 260, Value: "bytes.IndexAny($b1, $b2) != -1"}, + }, + ReportTemplate: "suggestion: bytes.ContainsAny($b1, $b2)", + SuggestTemplate: "bytes.ContainsAny($b1, $b2)", + }, + { + Line: 261, + SyntaxPatterns: []ir.PatternString{ + {Line: 261, Value: "bytes.IndexRune($b1, $b2) >= 0"}, + {Line: 261, Value: "bytes.IndexRune($b1, $b2) != -1"}, + }, + ReportTemplate: "suggestion: bytes.ContainsRune($b1, $b2)", + SuggestTemplate: "bytes.ContainsRune($b1, $b2)", + }, + { + Line: 263, + SyntaxPatterns: []ir.PatternString{{Line: 263, Value: "draw.DrawMask($_, $_, $_, $_, nil, image.Point{}, $_)"}}, ReportTemplate: "use draw.Draw method in `$$`", }, }, }, { - Line: 276, + Line: 271, Name: "regexpMust", MatcherName: "m", DocTags: []string{"style"}, @@ -894,22 +925,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "re := regexp.MustCompile(\"const pattern\")", Rules: []ir.Rule{ { - Line: 277, - SyntaxPatterns: []ir.PatternString{{Line: 277, Value: "regexp.Compile($pat)"}}, + Line: 272, + SyntaxPatterns: []ir.PatternString{{Line: 272, Value: "regexp.Compile($pat)"}}, ReportTemplate: "for const patterns like $pat, use regexp.MustCompile", WhereExpr: ir.FilterExpr{ - Line: 278, + Line: 273, Op: ir.FilterVarConstOp, Src: "m[\"pat\"].Const", Value: "pat", }, }, { - Line: 281, - SyntaxPatterns: []ir.PatternString{{Line: 281, Value: "regexp.CompilePOSIX($pat)"}}, + Line: 276, + SyntaxPatterns: []ir.PatternString{{Line: 276, Value: "regexp.CompilePOSIX($pat)"}}, ReportTemplate: "for const patterns like $pat, use regexp.MustCompilePOSIX", WhereExpr: ir.FilterExpr{ - Line: 282, + Line: 277, Op: ir.FilterVarConstOp, Src: "m[\"pat\"].Const", Value: "pat", @@ -918,7 +949,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 290, + Line: 285, Name: "badCall", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -927,22 +958,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "strings.Replace(s, from, to, -1)", Rules: []ir.Rule{ { - Line: 291, - SyntaxPatterns: []ir.PatternString{{Line: 291, Value: "strings.Replace($_, $_, $_, $zero)"}}, + Line: 286, + SyntaxPatterns: []ir.PatternString{{Line: 286, Value: "strings.Replace($_, $_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 292, + Line: 287, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 292, + Line: 287, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 292, + Line: 287, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -952,22 +983,22 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 294, - SyntaxPatterns: []ir.PatternString{{Line: 294, Value: "bytes.Replace($_, $_, $_, $zero)"}}, + Line: 289, + SyntaxPatterns: []ir.PatternString{{Line: 289, Value: "bytes.Replace($_, $_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 295, + Line: 290, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 295, + Line: 290, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 295, + Line: 290, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -977,22 +1008,22 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 298, - SyntaxPatterns: []ir.PatternString{{Line: 298, Value: "strings.SplitN($_, $_, $zero)"}}, + Line: 293, + SyntaxPatterns: []ir.PatternString{{Line: 293, Value: "strings.SplitN($_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 299, + Line: 294, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 299, + Line: 294, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 299, + Line: 294, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1002,22 +1033,22 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 301, - SyntaxPatterns: []ir.PatternString{{Line: 301, Value: "bytes.SplitN($_, $_, $zero)"}}, + Line: 296, + SyntaxPatterns: []ir.PatternString{{Line: 296, Value: "bytes.SplitN($_, $_, $zero)"}}, ReportTemplate: "suspicious arg 0, probably meant -1", WhereExpr: ir.FilterExpr{ - Line: 302, + Line: 297, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 302, + Line: 297, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 302, + Line: 297, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1027,19 +1058,19 @@ var PrecompiledRules = &ir.File{ LocationVar: "zero", }, { - Line: 305, - SyntaxPatterns: []ir.PatternString{{Line: 305, Value: "append($_)"}}, + Line: 300, + SyntaxPatterns: []ir.PatternString{{Line: 300, Value: "append($_)"}}, ReportTemplate: "no-op append call, probably missing arguments", }, { - Line: 307, - SyntaxPatterns: []ir.PatternString{{Line: 307, Value: "filepath.Join($_)"}}, + Line: 302, + SyntaxPatterns: []ir.PatternString{{Line: 302, Value: "filepath.Join($_)"}}, ReportTemplate: "suspicious Join on 1 argument", }, }, }, { - Line: 314, + Line: 309, Name: "assignOp", MatcherName: "m", DocTags: []string{"style"}, @@ -1048,87 +1079,87 @@ var PrecompiledRules = &ir.File{ DocAfter: "x *= 2", Rules: []ir.Rule{ { - Line: 315, - SyntaxPatterns: []ir.PatternString{{Line: 315, Value: "$x = $x + 1"}}, + Line: 310, + SyntaxPatterns: []ir.PatternString{{Line: 310, Value: "$x = $x + 1"}}, ReportTemplate: "replace `$$` with `$x++`", - WhereExpr: ir.FilterExpr{Line: 315, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 310, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 316, - SyntaxPatterns: []ir.PatternString{{Line: 316, Value: "$x = $x - 1"}}, + Line: 311, + SyntaxPatterns: []ir.PatternString{{Line: 311, Value: "$x = $x - 1"}}, ReportTemplate: "replace `$$` with `$x--`", - WhereExpr: ir.FilterExpr{Line: 316, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 311, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 318, - SyntaxPatterns: []ir.PatternString{{Line: 318, Value: "$x = $x + $y"}}, + Line: 313, + SyntaxPatterns: []ir.PatternString{{Line: 313, Value: "$x = $x + $y"}}, ReportTemplate: "replace `$$` with `$x += $y`", - WhereExpr: ir.FilterExpr{Line: 318, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 313, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 319, - SyntaxPatterns: []ir.PatternString{{Line: 319, Value: "$x = $x - $y"}}, + Line: 314, + SyntaxPatterns: []ir.PatternString{{Line: 314, Value: "$x = $x - $y"}}, ReportTemplate: "replace `$$` with `$x -= $y`", - WhereExpr: ir.FilterExpr{Line: 319, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 314, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 321, - SyntaxPatterns: []ir.PatternString{{Line: 321, Value: "$x = $x * $y"}}, + Line: 316, + SyntaxPatterns: []ir.PatternString{{Line: 316, Value: "$x = $x * $y"}}, ReportTemplate: "replace `$$` with `$x *= $y`", - WhereExpr: ir.FilterExpr{Line: 321, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 316, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 322, - SyntaxPatterns: []ir.PatternString{{Line: 322, Value: "$x = $x / $y"}}, + Line: 317, + SyntaxPatterns: []ir.PatternString{{Line: 317, Value: "$x = $x / $y"}}, ReportTemplate: "replace `$$` with `$x /= $y`", - WhereExpr: ir.FilterExpr{Line: 322, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 317, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 323, - SyntaxPatterns: []ir.PatternString{{Line: 323, Value: "$x = $x % $y"}}, + Line: 318, + SyntaxPatterns: []ir.PatternString{{Line: 318, Value: "$x = $x % $y"}}, ReportTemplate: "replace `$$` with `$x %= $y`", - WhereExpr: ir.FilterExpr{Line: 323, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 318, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 324, - SyntaxPatterns: []ir.PatternString{{Line: 324, Value: "$x = $x & $y"}}, + Line: 319, + SyntaxPatterns: []ir.PatternString{{Line: 319, Value: "$x = $x & $y"}}, ReportTemplate: "replace `$$` with `$x &= $y`", - WhereExpr: ir.FilterExpr{Line: 324, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 319, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 325, - SyntaxPatterns: []ir.PatternString{{Line: 325, Value: "$x = $x | $y"}}, + Line: 320, + SyntaxPatterns: []ir.PatternString{{Line: 320, Value: "$x = $x | $y"}}, ReportTemplate: "replace `$$` with `$x |= $y`", - WhereExpr: ir.FilterExpr{Line: 325, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 320, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 326, - SyntaxPatterns: []ir.PatternString{{Line: 326, Value: "$x = $x ^ $y"}}, + Line: 321, + SyntaxPatterns: []ir.PatternString{{Line: 321, Value: "$x = $x ^ $y"}}, ReportTemplate: "replace `$$` with `$x ^= $y`", - WhereExpr: ir.FilterExpr{Line: 326, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 321, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 327, - SyntaxPatterns: []ir.PatternString{{Line: 327, Value: "$x = $x << $y"}}, + Line: 322, + SyntaxPatterns: []ir.PatternString{{Line: 322, Value: "$x = $x << $y"}}, ReportTemplate: "replace `$$` with `$x <<= $y`", - WhereExpr: ir.FilterExpr{Line: 327, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 322, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 328, - SyntaxPatterns: []ir.PatternString{{Line: 328, Value: "$x = $x >> $y"}}, + Line: 323, + SyntaxPatterns: []ir.PatternString{{Line: 323, Value: "$x = $x >> $y"}}, ReportTemplate: "replace `$$` with `$x >>= $y`", - WhereExpr: ir.FilterExpr{Line: 328, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 323, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 329, - SyntaxPatterns: []ir.PatternString{{Line: 329, Value: "$x = $x &^ $y"}}, + Line: 324, + SyntaxPatterns: []ir.PatternString{{Line: 324, Value: "$x = $x &^ $y"}}, ReportTemplate: "replace `$$` with `$x &^= $y`", - WhereExpr: ir.FilterExpr{Line: 329, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 324, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, }, }, { - Line: 336, + Line: 331, Name: "preferWriteByte", MatcherName: "m", DocTags: []string{"performance", "experimental", "opinionated"}, @@ -1136,45 +1167,45 @@ var PrecompiledRules = &ir.File{ DocBefore: "w.WriteRune('\\n')", DocAfter: "w.WriteByte('\\n')", Rules: []ir.Rule{{ - Line: 340, - SyntaxPatterns: []ir.PatternString{{Line: 340, Value: "$w.WriteRune($c)"}}, + Line: 335, + SyntaxPatterns: []ir.PatternString{{Line: 335, Value: "$w.WriteRune($c)"}}, ReportTemplate: "consider writing single byte rune $c with $w.WriteByte($c)", WhereExpr: ir.FilterExpr{ - Line: 341, + Line: 336, Op: ir.FilterAndOp, Src: "m[\"w\"].Type.Implements(\"io.ByteWriter\") && (m[\"c\"].Const && m[\"c\"].Value.Int() < runeSelf)", Args: []ir.FilterExpr{ { - Line: 341, + Line: 336, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.ByteWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 341, Op: ir.FilterStringOp, Src: "\"io.ByteWriter\"", Value: "io.ByteWriter"}}, + Args: []ir.FilterExpr{{Line: 336, Op: ir.FilterStringOp, Src: "\"io.ByteWriter\"", Value: "io.ByteWriter"}}, }, { - Line: 341, + Line: 336, Op: ir.FilterAndOp, Src: "(m[\"c\"].Const && m[\"c\"].Value.Int() < runeSelf)", Args: []ir.FilterExpr{ { - Line: 341, + Line: 336, Op: ir.FilterVarConstOp, Src: "m[\"c\"].Const", Value: "c", }, { - Line: 341, + Line: 336, Op: ir.FilterLtOp, Src: "m[\"c\"].Value.Int() < runeSelf", Args: []ir.FilterExpr{ { - Line: 341, + Line: 336, Op: ir.FilterVarValueIntOp, Src: "m[\"c\"].Value.Int()", Value: "c", }, { - Line: 341, + Line: 336, Op: ir.FilterIntOp, Src: "runeSelf", Value: int64(128), @@ -1188,7 +1219,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 349, + Line: 344, Name: "preferFprint", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1197,66 +1228,147 @@ var PrecompiledRules = &ir.File{ DocAfter: "fmt.Fprintf(w, \"%x\", 10)", Rules: []ir.Rule{ { - Line: 350, - SyntaxPatterns: []ir.PatternString{{Line: 350, Value: "$w.Write([]byte(fmt.Sprint($*args)))"}}, + Line: 345, + SyntaxPatterns: []ir.PatternString{{Line: 345, Value: "$w.Write([]byte(fmt.Sprint($*args)))"}}, ReportTemplate: "fmt.Fprint($w, $args) should be preferred to the $$", SuggestTemplate: "fmt.Fprint($w, $args)", WhereExpr: ir.FilterExpr{ - Line: 351, + Line: 346, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.Writer\")", Value: "w", - Args: []ir.FilterExpr{{Line: 351, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + Args: []ir.FilterExpr{{Line: 346, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, }, }, { - Line: 355, - SyntaxPatterns: []ir.PatternString{{Line: 355, Value: "$w.Write([]byte(fmt.Sprintf($*args)))"}}, + Line: 350, + SyntaxPatterns: []ir.PatternString{{Line: 350, Value: "$w.Write([]byte(fmt.Sprintf($*args)))"}}, ReportTemplate: "fmt.Fprintf($w, $args) should be preferred to the $$", SuggestTemplate: "fmt.Fprintf($w, $args)", WhereExpr: ir.FilterExpr{ - Line: 356, + Line: 351, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.Writer\")", Value: "w", - Args: []ir.FilterExpr{{Line: 356, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + Args: []ir.FilterExpr{{Line: 351, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, }, }, { - Line: 360, - SyntaxPatterns: []ir.PatternString{{Line: 360, Value: "$w.Write([]byte(fmt.Sprintln($*args)))"}}, + Line: 355, + SyntaxPatterns: []ir.PatternString{{Line: 355, Value: "$w.Write([]byte(fmt.Sprintln($*args)))"}}, ReportTemplate: "fmt.Fprintln($w, $args) should be preferred to the $$", SuggestTemplate: "fmt.Fprintln($w, $args)", WhereExpr: ir.FilterExpr{ - Line: 361, + Line: 356, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.Writer\")", Value: "w", - Args: []ir.FilterExpr{{Line: 361, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + Args: []ir.FilterExpr{{Line: 356, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, }, }, { - Line: 365, - SyntaxPatterns: []ir.PatternString{{Line: 365, Value: "io.WriteString($w, fmt.Sprint($*args))"}}, + Line: 360, + SyntaxPatterns: []ir.PatternString{{Line: 360, Value: "io.WriteString($w, fmt.Sprint($*args))"}}, ReportTemplate: "suggestion: fmt.Fprint($w, $args)", SuggestTemplate: "fmt.Fprint($w, $args)", }, { - Line: 366, - SyntaxPatterns: []ir.PatternString{{Line: 366, Value: "io.WriteString($w, fmt.Sprintf($*args))"}}, + Line: 361, + SyntaxPatterns: []ir.PatternString{{Line: 361, Value: "io.WriteString($w, fmt.Sprintf($*args))"}}, ReportTemplate: "suggestion: fmt.Fprintf($w, $args)", SuggestTemplate: "fmt.Fprintf($w, $args)", }, + { + Line: 362, + SyntaxPatterns: []ir.PatternString{{Line: 362, Value: "io.WriteString($w, fmt.Sprintln($*args))"}}, + ReportTemplate: "suggestion: fmt.Fprintln($w, $args)", + SuggestTemplate: "fmt.Fprintln($w, $args)", + }, + { + Line: 364, + SyntaxPatterns: []ir.PatternString{{Line: 364, Value: "$w.WriteString(fmt.Sprint($*args))"}}, + ReportTemplate: "suggestion: fmt.Fprint($w, $args)", + SuggestTemplate: "fmt.Fprint($w, $args)", + WhereExpr: ir.FilterExpr{ + Line: 365, + Op: ir.FilterAndOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\") && m[\"w\"].Type.Implements(\"io.StringWriter\")", + Args: []ir.FilterExpr{ + { + Line: 365, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 365, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + }, + { + Line: 365, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 365, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + }, + }, + }, + }, { Line: 367, - SyntaxPatterns: []ir.PatternString{{Line: 367, Value: "io.WriteString($w, fmt.Sprintln($*args))"}}, + SyntaxPatterns: []ir.PatternString{{Line: 367, Value: "$w.WriteString(fmt.Sprintf($*args))"}}, + ReportTemplate: "suggestion: fmt.Fprintf($w, $args)", + SuggestTemplate: "fmt.Fprintf($w, $args)", + WhereExpr: ir.FilterExpr{ + Line: 368, + Op: ir.FilterAndOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\") && m[\"w\"].Type.Implements(\"io.StringWriter\")", + Args: []ir.FilterExpr{ + { + Line: 368, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 368, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + }, + { + Line: 368, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 368, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + }, + }, + }, + }, + { + Line: 370, + SyntaxPatterns: []ir.PatternString{{Line: 370, Value: "$w.WriteString(fmt.Sprintln($*args))"}}, ReportTemplate: "suggestion: fmt.Fprintln($w, $args)", SuggestTemplate: "fmt.Fprintln($w, $args)", + WhereExpr: ir.FilterExpr{ + Line: 371, + Op: ir.FilterAndOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\") && m[\"w\"].Type.Implements(\"io.StringWriter\")", + Args: []ir.FilterExpr{ + { + Line: 371, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.Writer\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 371, Op: ir.FilterStringOp, Src: "\"io.Writer\"", Value: "io.Writer"}}, + }, + { + Line: 371, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", + Value: "w", + Args: []ir.FilterExpr{{Line: 371, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + }, + }, + }, }, }, }, { - Line: 374, + Line: 379, Name: "dupArg", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1265,62 +1377,62 @@ var PrecompiledRules = &ir.File{ DocAfter: "copy(dst, src)", Rules: []ir.Rule{ { - Line: 375, + Line: 380, SyntaxPatterns: []ir.PatternString{ - {Line: 375, Value: "$x.Equal($x)"}, - {Line: 375, Value: "$x.Equals($x)"}, - {Line: 375, Value: "$x.Compare($x)"}, - {Line: 375, Value: "$x.Cmp($x)"}, + {Line: 380, Value: "$x.Equal($x)"}, + {Line: 380, Value: "$x.Equals($x)"}, + {Line: 380, Value: "$x.Compare($x)"}, + {Line: 380, Value: "$x.Cmp($x)"}, }, ReportTemplate: "suspicious method call with the same argument and receiver", - WhereExpr: ir.FilterExpr{Line: 376, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 381, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, { - Line: 379, + Line: 384, SyntaxPatterns: []ir.PatternString{ - {Line: 379, Value: "copy($x, $x)"}, - {Line: 380, Value: "math.Max($x, $x)"}, - {Line: 381, Value: "math.Min($x, $x)"}, - {Line: 382, Value: "reflect.Copy($x, $x)"}, - {Line: 383, Value: "reflect.DeepEqual($x, $x)"}, - {Line: 384, Value: "strings.Contains($x, $x)"}, - {Line: 385, Value: "strings.Compare($x, $x)"}, - {Line: 386, Value: "strings.EqualFold($x, $x)"}, - {Line: 387, Value: "strings.HasPrefix($x, $x)"}, - {Line: 388, Value: "strings.HasSuffix($x, $x)"}, - {Line: 389, Value: "strings.Index($x, $x)"}, - {Line: 390, Value: "strings.LastIndex($x, $x)"}, - {Line: 391, Value: "strings.Split($x, $x)"}, - {Line: 392, Value: "strings.SplitAfter($x, $x)"}, - {Line: 393, Value: "strings.SplitAfterN($x, $x, $_)"}, - {Line: 394, Value: "strings.SplitN($x, $x, $_)"}, - {Line: 395, Value: "strings.Replace($_, $x, $x, $_)"}, - {Line: 396, Value: "strings.ReplaceAll($_, $x, $x)"}, - {Line: 397, Value: "bytes.Contains($x, $x)"}, - {Line: 398, Value: "bytes.Compare($x, $x)"}, - {Line: 399, Value: "bytes.Equal($x, $x)"}, - {Line: 400, Value: "bytes.EqualFold($x, $x)"}, - {Line: 401, Value: "bytes.HasPrefix($x, $x)"}, - {Line: 402, Value: "bytes.HasSuffix($x, $x)"}, - {Line: 403, Value: "bytes.Index($x, $x)"}, - {Line: 404, Value: "bytes.LastIndex($x, $x)"}, - {Line: 405, Value: "bytes.Split($x, $x)"}, - {Line: 406, Value: "bytes.SplitAfter($x, $x)"}, - {Line: 407, Value: "bytes.SplitAfterN($x, $x, $_)"}, - {Line: 408, Value: "bytes.SplitN($x, $x, $_)"}, - {Line: 409, Value: "bytes.Replace($_, $x, $x, $_)"}, - {Line: 410, Value: "bytes.ReplaceAll($_, $x, $x)"}, - {Line: 411, Value: "types.Identical($x, $x)"}, - {Line: 412, Value: "types.IdenticalIgnoreTags($x, $x)"}, - {Line: 413, Value: "draw.Draw($x, $_, $x, $_, $_)"}, + {Line: 384, Value: "copy($x, $x)"}, + {Line: 385, Value: "math.Max($x, $x)"}, + {Line: 386, Value: "math.Min($x, $x)"}, + {Line: 387, Value: "reflect.Copy($x, $x)"}, + {Line: 388, Value: "reflect.DeepEqual($x, $x)"}, + {Line: 389, Value: "strings.Contains($x, $x)"}, + {Line: 390, Value: "strings.Compare($x, $x)"}, + {Line: 391, Value: "strings.EqualFold($x, $x)"}, + {Line: 392, Value: "strings.HasPrefix($x, $x)"}, + {Line: 393, Value: "strings.HasSuffix($x, $x)"}, + {Line: 394, Value: "strings.Index($x, $x)"}, + {Line: 395, Value: "strings.LastIndex($x, $x)"}, + {Line: 396, Value: "strings.Split($x, $x)"}, + {Line: 397, Value: "strings.SplitAfter($x, $x)"}, + {Line: 398, Value: "strings.SplitAfterN($x, $x, $_)"}, + {Line: 399, Value: "strings.SplitN($x, $x, $_)"}, + {Line: 400, Value: "strings.Replace($_, $x, $x, $_)"}, + {Line: 401, Value: "strings.ReplaceAll($_, $x, $x)"}, + {Line: 402, Value: "bytes.Contains($x, $x)"}, + {Line: 403, Value: "bytes.Compare($x, $x)"}, + {Line: 404, Value: "bytes.Equal($x, $x)"}, + {Line: 405, Value: "bytes.EqualFold($x, $x)"}, + {Line: 406, Value: "bytes.HasPrefix($x, $x)"}, + {Line: 407, Value: "bytes.HasSuffix($x, $x)"}, + {Line: 408, Value: "bytes.Index($x, $x)"}, + {Line: 409, Value: "bytes.LastIndex($x, $x)"}, + {Line: 410, Value: "bytes.Split($x, $x)"}, + {Line: 411, Value: "bytes.SplitAfter($x, $x)"}, + {Line: 412, Value: "bytes.SplitAfterN($x, $x, $_)"}, + {Line: 413, Value: "bytes.SplitN($x, $x, $_)"}, + {Line: 414, Value: "bytes.Replace($_, $x, $x, $_)"}, + {Line: 415, Value: "bytes.ReplaceAll($_, $x, $x)"}, + {Line: 416, Value: "types.Identical($x, $x)"}, + {Line: 417, Value: "types.IdenticalIgnoreTags($x, $x)"}, + {Line: 418, Value: "draw.Draw($x, $_, $x, $_, $_)"}, }, ReportTemplate: "suspicious duplicated args in $$", - WhereExpr: ir.FilterExpr{Line: 414, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 419, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, }, }, { - Line: 422, + Line: 427, Name: "returnAfterHttpError", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1328,14 +1440,14 @@ var PrecompiledRules = &ir.File{ DocBefore: "if err != nil { http.Error(...); }", DocAfter: "if err != nil { http.Error(...); return; }", Rules: []ir.Rule{{ - Line: 423, - SyntaxPatterns: []ir.PatternString{{Line: 423, Value: "if $_ { $*_; http.Error($w, $err, $code) }"}}, + Line: 428, + SyntaxPatterns: []ir.PatternString{{Line: 428, Value: "if $_ { $*_; http.Error($w, $err, $code) }"}}, ReportTemplate: "Possibly return is missed after the http.Error call", LocationVar: "w", }}, }, { - Line: 432, + Line: 437, Name: "preferFilepathJoin", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1343,35 +1455,35 @@ var PrecompiledRules = &ir.File{ DocBefore: "x + string(os.PathSeparator) + y", DocAfter: "filepath.Join(x, y)", Rules: []ir.Rule{{ - Line: 433, - SyntaxPatterns: []ir.PatternString{{Line: 433, Value: "$x + string(os.PathSeparator) + $y"}}, + Line: 438, + SyntaxPatterns: []ir.PatternString{{Line: 438, Value: "$x + string(os.PathSeparator) + $y"}}, ReportTemplate: "filepath.Join($x, $y) should be preferred to the $$", SuggestTemplate: "filepath.Join($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 434, + Line: 439, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`string`) && m[\"y\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 434, + Line: 439, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 434, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 439, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, { - Line: 434, + Line: 439, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`string`)", Value: "y", - Args: []ir.FilterExpr{{Line: 434, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 439, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }}, }, { - Line: 443, + Line: 448, Name: "preferStringWriter", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1380,35 +1492,35 @@ var PrecompiledRules = &ir.File{ DocAfter: "w.WriteString(\"foo\")", Rules: []ir.Rule{ { - Line: 444, - SyntaxPatterns: []ir.PatternString{{Line: 444, Value: "$w.Write([]byte($s))"}}, + Line: 449, + SyntaxPatterns: []ir.PatternString{{Line: 449, Value: "$w.Write([]byte($s))"}}, ReportTemplate: "$w.WriteString($s) should be preferred to the $$", SuggestTemplate: "$w.WriteString($s)", WhereExpr: ir.FilterExpr{ - Line: 445, + Line: 450, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 445, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + Args: []ir.FilterExpr{{Line: 450, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, }, }, { - Line: 449, - SyntaxPatterns: []ir.PatternString{{Line: 449, Value: "io.WriteString($w, $s)"}}, + Line: 454, + SyntaxPatterns: []ir.PatternString{{Line: 454, Value: "io.WriteString($w, $s)"}}, ReportTemplate: "$w.WriteString($s) should be preferred to the $$", SuggestTemplate: "$w.WriteString($s)", WhereExpr: ir.FilterExpr{ - Line: 450, + Line: 455, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 450, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + Args: []ir.FilterExpr{{Line: 455, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, }, }, }, }, { - Line: 459, + Line: 464, Name: "sliceClear", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1416,22 +1528,22 @@ var PrecompiledRules = &ir.File{ DocBefore: "for i := 0; i < len(buf); i++ { buf[i] = 0 }", DocAfter: "for i := range buf { buf[i] = 0 }", Rules: []ir.Rule{{ - Line: 460, - SyntaxPatterns: []ir.PatternString{{Line: 460, Value: "for $i := 0; $i < len($xs); $i++ { $xs[$i] = $zero }"}}, + Line: 465, + SyntaxPatterns: []ir.PatternString{{Line: 465, Value: "for $i := 0; $i < len($xs); $i++ { $xs[$i] = $zero }"}}, ReportTemplate: "rewrite as for-range so compiler can recognize this pattern", WhereExpr: ir.FilterExpr{ - Line: 461, + Line: 466, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 461, + Line: 466, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 461, + Line: 466, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1441,7 +1553,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 469, + Line: 474, Name: "syncMapLoadAndDelete", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1449,33 +1561,33 @@ var PrecompiledRules = &ir.File{ DocBefore: "v, ok := m.Load(k); if ok { m.Delete($k); f(v); }", DocAfter: "v, deleted := m.LoadAndDelete(k); if deleted { f(v) }", Rules: []ir.Rule{{ - Line: 470, - SyntaxPatterns: []ir.PatternString{{Line: 470, Value: "$_, $ok := $m.Load($k); if $ok { $m.Delete($k); $*_ }"}}, + Line: 475, + SyntaxPatterns: []ir.PatternString{{Line: 475, Value: "$_, $ok := $m.Load($k); if $ok { $m.Delete($k); $*_ }"}}, ReportTemplate: "use $m.LoadAndDelete to perform load+delete operations atomically", WhereExpr: ir.FilterExpr{ - Line: 471, + Line: 476, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.15\") &&\n\tm[\"m\"].Type.Is(`*sync.Map`)", Args: []ir.FilterExpr{ { - Line: 471, + Line: 476, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.15\")", Value: "1.15", }, { - Line: 472, + Line: 477, Op: ir.FilterVarTypeIsOp, Src: "m[\"m\"].Type.Is(`*sync.Map`)", Value: "m", - Args: []ir.FilterExpr{{Line: 472, Op: ir.FilterStringOp, Src: "`*sync.Map`", Value: "*sync.Map"}}, + Args: []ir.FilterExpr{{Line: 477, Op: ir.FilterStringOp, Src: "`*sync.Map`", Value: "*sync.Map"}}, }, }, }, }}, }, { - Line: 480, + Line: 485, Name: "sprintfQuotedString", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1483,34 +1595,34 @@ var PrecompiledRules = &ir.File{ DocBefore: "fmt.Sprintf(`\"%s\"`, s)", DocAfter: "fmt.Sprintf(`%q`, s)", Rules: []ir.Rule{{ - Line: 481, - SyntaxPatterns: []ir.PatternString{{Line: 481, Value: "fmt.Sprintf($s, $*_)"}}, + Line: 486, + SyntaxPatterns: []ir.PatternString{{Line: 486, Value: "fmt.Sprintf($s, $*_)"}}, ReportTemplate: "use %q instead of \"%s\" for quoted strings", WhereExpr: ir.FilterExpr{ - Line: 482, + Line: 487, Op: ir.FilterOrOp, Src: "m[\"s\"].Text.Matches(\"^`.*\\\"%s\\\".*`$\") ||\n\tm[\"s\"].Text.Matches(`^\".*\\\\\"%s\\\\\".*\"$`)", Args: []ir.FilterExpr{ { - Line: 482, + Line: 487, Op: ir.FilterVarTextMatchesOp, Src: "m[\"s\"].Text.Matches(\"^`.*\\\"%s\\\".*`$\")", Value: "s", - Args: []ir.FilterExpr{{Line: 482, Op: ir.FilterStringOp, Src: "\"^`.*\\\"%s\\\".*`$\"", Value: "^`.*\"%s\".*`$"}}, + Args: []ir.FilterExpr{{Line: 487, Op: ir.FilterStringOp, Src: "\"^`.*\\\"%s\\\".*`$\"", Value: "^`.*\"%s\".*`$"}}, }, { - Line: 483, + Line: 488, Op: ir.FilterVarTextMatchesOp, Src: "m[\"s\"].Text.Matches(`^\".*\\\\\"%s\\\\\".*\"$`)", Value: "s", - Args: []ir.FilterExpr{{Line: 483, Op: ir.FilterStringOp, Src: "`^\".*\\\\\"%s\\\\\".*\"$`", Value: "^\".*\\\\\"%s\\\\\".*\"$"}}, + Args: []ir.FilterExpr{{Line: 488, Op: ir.FilterStringOp, Src: "`^\".*\\\\\"%s\\\\\".*\"$`", Value: "^\".*\\\\\"%s\\\\\".*\"$"}}, }, }, }, }}, }, { - Line: 491, + Line: 496, Name: "offBy1", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1519,80 +1631,80 @@ var PrecompiledRules = &ir.File{ DocAfter: "xs[len(xs)-1]", Rules: []ir.Rule{ { - Line: 492, - SyntaxPatterns: []ir.PatternString{{Line: 492, Value: "$x[len($x)]"}}, + Line: 497, + SyntaxPatterns: []ir.PatternString{{Line: 497, Value: "$x[len($x)]"}}, ReportTemplate: "index expr always panics; maybe you wanted $x[len($x)-1]?", SuggestTemplate: "$x[len($x)-1]", WhereExpr: ir.FilterExpr{ - Line: 493, + Line: 498, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"x\"].Type.Is(`[]$_`)", Args: []ir.FilterExpr{ - {Line: 493, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 498, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, { - Line: 493, + Line: 498, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]$_`)", Value: "x", - Args: []ir.FilterExpr{{Line: 493, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, + Args: []ir.FilterExpr{{Line: 498, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, }, }, }, }, { - Line: 500, + Line: 505, SyntaxPatterns: []ir.PatternString{ - {Line: 501, Value: "$i := strings.Index($s, $_); $_ := $slicing[$i:]"}, - {Line: 502, Value: "$i := strings.Index($s, $_); $_ = $slicing[$i:]"}, - {Line: 503, Value: "$i := bytes.Index($s, $_); $_ := $slicing[$i:]"}, - {Line: 504, Value: "$i := bytes.Index($s, $_); $_ = $slicing[$i:]"}, + {Line: 506, Value: "$i := strings.Index($s, $_); $_ := $slicing[$i:]"}, + {Line: 507, Value: "$i := strings.Index($s, $_); $_ = $slicing[$i:]"}, + {Line: 508, Value: "$i := bytes.Index($s, $_); $_ := $slicing[$i:]"}, + {Line: 509, Value: "$i := bytes.Index($s, $_); $_ = $slicing[$i:]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do $s[$i+1:]", WhereExpr: ir.FilterExpr{ - Line: 505, + Line: 510, Op: ir.FilterEqOp, Src: "m[\"s\"].Text == m[\"slicing\"].Text", Args: []ir.FilterExpr{ - {Line: 505, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, - {Line: 505, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, + {Line: 510, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, + {Line: 510, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, }, }, LocationVar: "slicing", }, { - Line: 509, + Line: 514, SyntaxPatterns: []ir.PatternString{ - {Line: 510, Value: "$i := strings.Index($s, $_); $_ := $slicing[:$i]"}, - {Line: 511, Value: "$i := strings.Index($s, $_); $_ = $slicing[:$i]"}, - {Line: 512, Value: "$i := bytes.Index($s, $_); $_ := $slicing[:$i]"}, - {Line: 513, Value: "$i := bytes.Index($s, $_); $_ = $slicing[:$i]"}, + {Line: 515, Value: "$i := strings.Index($s, $_); $_ := $slicing[:$i]"}, + {Line: 516, Value: "$i := strings.Index($s, $_); $_ = $slicing[:$i]"}, + {Line: 517, Value: "$i := bytes.Index($s, $_); $_ := $slicing[:$i]"}, + {Line: 518, Value: "$i := bytes.Index($s, $_); $_ = $slicing[:$i]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do $s[:$i+1]", WhereExpr: ir.FilterExpr{ - Line: 514, + Line: 519, Op: ir.FilterEqOp, Src: "m[\"s\"].Text == m[\"slicing\"].Text", Args: []ir.FilterExpr{ - {Line: 514, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, - {Line: 514, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, + {Line: 519, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, + {Line: 519, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, }, }, LocationVar: "slicing", }, { - Line: 518, + Line: 523, SyntaxPatterns: []ir.PatternString{ - {Line: 519, Value: "$s[strings.Index($s, $_):]"}, - {Line: 520, Value: "$s[:strings.Index($s, $_)]"}, - {Line: 521, Value: "$s[bytes.Index($s, $_):]"}, - {Line: 522, Value: "$s[:bytes.Index($s, $_)]"}, + {Line: 524, Value: "$s[strings.Index($s, $_):]"}, + {Line: 525, Value: "$s[:strings.Index($s, $_)]"}, + {Line: 526, Value: "$s[bytes.Index($s, $_):]"}, + {Line: 527, Value: "$s[:bytes.Index($s, $_)]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do Index()+1", }, }, }, { - Line: 530, + Line: 535, Name: "unslice", MatcherName: "m", DocTags: []string{"style"}, @@ -1600,35 +1712,35 @@ var PrecompiledRules = &ir.File{ DocBefore: "copy(b[:], values...)", DocAfter: "copy(b, values...)", Rules: []ir.Rule{{ - Line: 531, - SyntaxPatterns: []ir.PatternString{{Line: 531, Value: "$s[:]"}}, + Line: 536, + SyntaxPatterns: []ir.PatternString{{Line: 536, Value: "$s[:]"}}, ReportTemplate: "could simplify $$ to $s", SuggestTemplate: "$s", WhereExpr: ir.FilterExpr{ - Line: 532, + Line: 537, Op: ir.FilterOrOp, Src: "m[\"s\"].Type.Is(`string`) || m[\"s\"].Type.Is(`[]$_`)", Args: []ir.FilterExpr{ { - Line: 532, + Line: 537, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 532, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 537, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, { - Line: 532, + Line: 537, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`[]$_`)", Value: "s", - Args: []ir.FilterExpr{{Line: 532, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, + Args: []ir.FilterExpr{{Line: 537, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, }, }, }, }}, }, { - Line: 541, + Line: 546, Name: "yodaStyleExpr", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1637,105 +1749,105 @@ var PrecompiledRules = &ir.File{ DocAfter: "return ptr != nil", Rules: []ir.Rule{ { - Line: 542, - SyntaxPatterns: []ir.PatternString{{Line: 542, Value: "$constval != $x"}}, + Line: 547, + SyntaxPatterns: []ir.PatternString{{Line: 547, Value: "$constval != $x"}}, ReportTemplate: "consider to change order in expression to $x != $constval", WhereExpr: ir.FilterExpr{ - Line: 542, + Line: 547, Op: ir.FilterAndOp, Src: "m[\"constval\"].Node.Is(`BasicLit`) && !m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{ { - Line: 542, + Line: 547, Op: ir.FilterVarNodeIsOp, Src: "m[\"constval\"].Node.Is(`BasicLit`)", Value: "constval", - Args: []ir.FilterExpr{{Line: 542, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }, { - Line: 542, + Line: 547, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 542, + Line: 547, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 542, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 544, - SyntaxPatterns: []ir.PatternString{{Line: 544, Value: "$constval == $x"}}, + Line: 549, + SyntaxPatterns: []ir.PatternString{{Line: 549, Value: "$constval == $x"}}, ReportTemplate: "consider to change order in expression to $x == $constval", WhereExpr: ir.FilterExpr{ - Line: 544, + Line: 549, Op: ir.FilterAndOp, Src: "m[\"constval\"].Node.Is(`BasicLit`) && !m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{ { - Line: 544, + Line: 549, Op: ir.FilterVarNodeIsOp, Src: "m[\"constval\"].Node.Is(`BasicLit`)", Value: "constval", - Args: []ir.FilterExpr{{Line: 544, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }, { - Line: 544, + Line: 549, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 544, + Line: 549, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 544, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 547, - SyntaxPatterns: []ir.PatternString{{Line: 547, Value: "nil != $x"}}, + Line: 552, + SyntaxPatterns: []ir.PatternString{{Line: 552, Value: "nil != $x"}}, ReportTemplate: "consider to change order in expression to $x != nil", WhereExpr: ir.FilterExpr{ - Line: 547, + Line: 552, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 547, + Line: 552, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 552, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, { - Line: 549, - SyntaxPatterns: []ir.PatternString{{Line: 549, Value: "nil == $x"}}, + Line: 554, + SyntaxPatterns: []ir.PatternString{{Line: 554, Value: "nil == $x"}}, ReportTemplate: "consider to change order in expression to $x == nil", WhereExpr: ir.FilterExpr{ - Line: 549, + Line: 554, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 549, + Line: 554, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 554, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 557, + Line: 562, Name: "equalFold", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1744,114 +1856,114 @@ var PrecompiledRules = &ir.File{ DocAfter: "strings.EqualFold(x, y)", Rules: []ir.Rule{ { - Line: 566, + Line: 571, SyntaxPatterns: []ir.PatternString{ - {Line: 567, Value: "strings.ToLower($x) == $y"}, - {Line: 568, Value: "strings.ToLower($x) == strings.ToLower($y)"}, - {Line: 569, Value: "$x == strings.ToLower($y)"}, - {Line: 570, Value: "strings.ToUpper($x) == $y"}, - {Line: 571, Value: "strings.ToUpper($x) == strings.ToUpper($y)"}, - {Line: 572, Value: "$x == strings.ToUpper($y)"}, + {Line: 572, Value: "strings.ToLower($x) == $y"}, + {Line: 573, Value: "strings.ToLower($x) == strings.ToLower($y)"}, + {Line: 574, Value: "$x == strings.ToLower($y)"}, + {Line: 575, Value: "strings.ToUpper($x) == $y"}, + {Line: 576, Value: "strings.ToUpper($x) == strings.ToUpper($y)"}, + {Line: 577, Value: "$x == strings.ToUpper($y)"}, }, ReportTemplate: "consider replacing with strings.EqualFold($x, $y)", SuggestTemplate: "strings.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 573, + Line: 578, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 573, + Line: 578, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 573, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 573, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 578, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 578, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 573, + Line: 578, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 573, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 573, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 578, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 578, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, }, }, { - Line: 578, + Line: 583, SyntaxPatterns: []ir.PatternString{ - {Line: 579, Value: "strings.ToLower($x) != $y"}, - {Line: 580, Value: "strings.ToLower($x) != strings.ToLower($y)"}, - {Line: 581, Value: "$x != strings.ToLower($y)"}, - {Line: 582, Value: "strings.ToUpper($x) != $y"}, - {Line: 583, Value: "strings.ToUpper($x) != strings.ToUpper($y)"}, - {Line: 584, Value: "$x != strings.ToUpper($y)"}, + {Line: 584, Value: "strings.ToLower($x) != $y"}, + {Line: 585, Value: "strings.ToLower($x) != strings.ToLower($y)"}, + {Line: 586, Value: "$x != strings.ToLower($y)"}, + {Line: 587, Value: "strings.ToUpper($x) != $y"}, + {Line: 588, Value: "strings.ToUpper($x) != strings.ToUpper($y)"}, + {Line: 589, Value: "$x != strings.ToUpper($y)"}, }, ReportTemplate: "consider replacing with !strings.EqualFold($x, $y)", SuggestTemplate: "!strings.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 585, + Line: 590, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 585, + Line: 590, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 585, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 585, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 590, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 590, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 585, + Line: 590, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 585, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 585, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 590, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 590, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, }, }, { - Line: 590, + Line: 595, SyntaxPatterns: []ir.PatternString{ - {Line: 591, Value: "bytes.Equal(bytes.ToLower($x), $y)"}, - {Line: 592, Value: "bytes.Equal(bytes.ToLower($x), bytes.ToLower($y))"}, - {Line: 593, Value: "bytes.Equal($x, bytes.ToLower($y))"}, - {Line: 594, Value: "bytes.Equal(bytes.ToUpper($x), $y)"}, - {Line: 595, Value: "bytes.Equal(bytes.ToUpper($x), bytes.ToUpper($y))"}, - {Line: 596, Value: "bytes.Equal($x, bytes.ToUpper($y))"}, + {Line: 596, Value: "bytes.Equal(bytes.ToLower($x), $y)"}, + {Line: 597, Value: "bytes.Equal(bytes.ToLower($x), bytes.ToLower($y))"}, + {Line: 598, Value: "bytes.Equal($x, bytes.ToLower($y))"}, + {Line: 599, Value: "bytes.Equal(bytes.ToUpper($x), $y)"}, + {Line: 600, Value: "bytes.Equal(bytes.ToUpper($x), bytes.ToUpper($y))"}, + {Line: 601, Value: "bytes.Equal($x, bytes.ToUpper($y))"}, }, ReportTemplate: "consider replacing with bytes.EqualFold($x, $y)", SuggestTemplate: "bytes.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 597, + Line: 602, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 597, + Line: 602, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 597, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 597, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 602, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 602, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 597, + Line: 602, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 597, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 597, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 602, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 602, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, @@ -1860,7 +1972,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 606, + Line: 611, Name: "argOrder", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1868,45 +1980,45 @@ var PrecompiledRules = &ir.File{ DocBefore: "strings.HasPrefix(\"#\", userpass)", DocAfter: "strings.HasPrefix(userpass, \"#\")", Rules: []ir.Rule{{ - Line: 607, + Line: 612, SyntaxPatterns: []ir.PatternString{ - {Line: 608, Value: "strings.HasPrefix($lit, $s)"}, - {Line: 609, Value: "bytes.HasPrefix($lit, $s)"}, - {Line: 610, Value: "strings.HasSuffix($lit, $s)"}, - {Line: 611, Value: "bytes.HasSuffix($lit, $s)"}, - {Line: 612, Value: "strings.Contains($lit, $s)"}, - {Line: 613, Value: "bytes.Contains($lit, $s)"}, - {Line: 614, Value: "strings.TrimPrefix($lit, $s)"}, - {Line: 615, Value: "bytes.TrimPrefix($lit, $s)"}, - {Line: 616, Value: "strings.TrimSuffix($lit, $s)"}, - {Line: 617, Value: "bytes.TrimSuffix($lit, $s)"}, - {Line: 618, Value: "strings.Split($lit, $s)"}, - {Line: 619, Value: "bytes.Split($lit, $s)"}, + {Line: 613, Value: "strings.HasPrefix($lit, $s)"}, + {Line: 614, Value: "bytes.HasPrefix($lit, $s)"}, + {Line: 615, Value: "strings.HasSuffix($lit, $s)"}, + {Line: 616, Value: "bytes.HasSuffix($lit, $s)"}, + {Line: 617, Value: "strings.Contains($lit, $s)"}, + {Line: 618, Value: "bytes.Contains($lit, $s)"}, + {Line: 619, Value: "strings.TrimPrefix($lit, $s)"}, + {Line: 620, Value: "bytes.TrimPrefix($lit, $s)"}, + {Line: 621, Value: "strings.TrimSuffix($lit, $s)"}, + {Line: 622, Value: "bytes.TrimSuffix($lit, $s)"}, + {Line: 623, Value: "strings.Split($lit, $s)"}, + {Line: 624, Value: "bytes.Split($lit, $s)"}, }, ReportTemplate: "$lit and $s arguments order looks reversed", WhereExpr: ir.FilterExpr{ - Line: 620, + Line: 625, Op: ir.FilterAndOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice) &&\n\t!(m[\"s\"].Const || m[\"s\"].ConstSlice) &&\n\t!m[\"lit\"].Node.Is(`Ident`)", Args: []ir.FilterExpr{ { - Line: 620, + Line: 625, Op: ir.FilterAndOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice) &&\n\t!(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 620, + Line: 625, Op: ir.FilterOrOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 620, + Line: 625, Op: ir.FilterVarConstOp, Src: "m[\"lit\"].Const", Value: "lit", }, { - Line: 620, + Line: 625, Op: ir.FilterVarConstSliceOp, Src: "m[\"lit\"].ConstSlice", Value: "lit", @@ -1914,22 +2026,22 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 621, + Line: 626, Op: ir.FilterNotOp, Src: "!(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{{ - Line: 621, + Line: 626, Op: ir.FilterOrOp, Src: "(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 621, + Line: 626, Op: ir.FilterVarConstOp, Src: "m[\"s\"].Const", Value: "s", }, { - Line: 621, + Line: 626, Op: ir.FilterVarConstSliceOp, Src: "m[\"s\"].ConstSlice", Value: "s", @@ -1940,15 +2052,15 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 622, + Line: 627, Op: ir.FilterNotOp, Src: "!m[\"lit\"].Node.Is(`Ident`)", Args: []ir.FilterExpr{{ - Line: 622, + Line: 627, Op: ir.FilterVarNodeIsOp, Src: "m[\"lit\"].Node.Is(`Ident`)", Value: "lit", - Args: []ir.FilterExpr{{Line: 622, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 627, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }}, }, }, @@ -1956,7 +2068,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 630, + Line: 635, Name: "stringConcatSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1965,27 +2077,27 @@ var PrecompiledRules = &ir.File{ DocAfter: "x + \"_\" + y", Rules: []ir.Rule{ { - Line: 631, - SyntaxPatterns: []ir.PatternString{{Line: 631, Value: "strings.Join([]string{$x, $y}, \"\")"}}, + Line: 636, + SyntaxPatterns: []ir.PatternString{{Line: 636, Value: "strings.Join([]string{$x, $y}, \"\")"}}, ReportTemplate: "suggestion: $x + $y", SuggestTemplate: "$x + $y", }, { - Line: 632, - SyntaxPatterns: []ir.PatternString{{Line: 632, Value: "strings.Join([]string{$x, $y, $z}, \"\")"}}, + Line: 637, + SyntaxPatterns: []ir.PatternString{{Line: 637, Value: "strings.Join([]string{$x, $y, $z}, \"\")"}}, ReportTemplate: "suggestion: $x + $y + $z", SuggestTemplate: "$x + $y + $z", }, { - Line: 633, - SyntaxPatterns: []ir.PatternString{{Line: 633, Value: "strings.Join([]string{$x, $y}, $glue)"}}, + Line: 638, + SyntaxPatterns: []ir.PatternString{{Line: 638, Value: "strings.Join([]string{$x, $y}, $glue)"}}, ReportTemplate: "suggestion: $x + $glue + $y", SuggestTemplate: "$x + $glue + $y", }, }, }, { - Line: 640, + Line: 645, Name: "timeExprSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1994,39 +2106,39 @@ var PrecompiledRules = &ir.File{ DocAfter: "t.UnixMilli()", Rules: []ir.Rule{ { - Line: 645, - SyntaxPatterns: []ir.PatternString{{Line: 645, Value: "$t.Unix() / 1000"}}, + Line: 650, + SyntaxPatterns: []ir.PatternString{{Line: 650, Value: "$t.Unix() / 1000"}}, ReportTemplate: "use $t.UnixMilli() instead of $$", SuggestTemplate: "$t.UnixMilli()", WhereExpr: ir.FilterExpr{ - Line: 646, + Line: 651, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\") && isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 646, + Line: 651, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\")", Value: "1.17", }, { - Line: 646, + Line: 651, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 646, + Line: 651, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 646, + Line: 651, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2034,39 +2146,39 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 650, - SyntaxPatterns: []ir.PatternString{{Line: 650, Value: "$t.UnixNano() * 1000"}}, + Line: 655, + SyntaxPatterns: []ir.PatternString{{Line: 655, Value: "$t.UnixNano() * 1000"}}, ReportTemplate: "use $t.UnixMicro() instead of $$", SuggestTemplate: "$t.UnixMicro()", WhereExpr: ir.FilterExpr{ - Line: 651, + Line: 656, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\") && isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 651, + Line: 656, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\")", Value: "1.17", }, { - Line: 651, + Line: 656, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 651, + Line: 656, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 651, + Line: 656, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 642, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2076,7 +2188,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 660, + Line: 665, Name: "timeCmpSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2085,55 +2197,55 @@ var PrecompiledRules = &ir.File{ DocAfter: "t.After(tt)", Rules: []ir.Rule{ { - Line: 665, - SyntaxPatterns: []ir.PatternString{{Line: 665, Value: "!$t.Before($tt)"}}, + Line: 670, + SyntaxPatterns: []ir.PatternString{{Line: 670, Value: "!$t.Before($tt)"}}, ReportTemplate: "suggestion: $t.After($tt)", SuggestTemplate: "$t.After($tt)", WhereExpr: ir.FilterExpr{ - Line: 666, + Line: 671, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 666, + Line: 671, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 666, + Line: 671, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, }, { - Line: 669, - SyntaxPatterns: []ir.PatternString{{Line: 669, Value: "!$t.After($tt)"}}, + Line: 674, + SyntaxPatterns: []ir.PatternString{{Line: 674, Value: "!$t.After($tt)"}}, ReportTemplate: "suggestion: $t.Before($tt)", SuggestTemplate: "$t.Before($tt)", WhereExpr: ir.FilterExpr{ - Line: 670, + Line: 675, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 670, + Line: 675, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 670, + Line: 675, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 662, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2141,7 +2253,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 678, + Line: 683, Name: "exposedSyncMutex", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2150,57 +2262,57 @@ var PrecompiledRules = &ir.File{ DocAfter: "type Foo struct{ ...; mu sync.Mutex; ... }", Rules: []ir.Rule{ { - Line: 683, - SyntaxPatterns: []ir.PatternString{{Line: 683, Value: "type $x struct { $*_; sync.Mutex; $*_ }"}}, + Line: 688, + SyntaxPatterns: []ir.PatternString{{Line: 688, Value: "type $x struct { $*_; sync.Mutex; $*_ }"}}, ReportTemplate: "don't embed sync.Mutex", WhereExpr: ir.FilterExpr{ - Line: 684, + Line: 689, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 685, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 687, - SyntaxPatterns: []ir.PatternString{{Line: 687, Value: "type $x struct { $*_; *sync.Mutex; $*_ }"}}, + Line: 692, + SyntaxPatterns: []ir.PatternString{{Line: 692, Value: "type $x struct { $*_; *sync.Mutex; $*_ }"}}, ReportTemplate: "don't embed *sync.Mutex", WhereExpr: ir.FilterExpr{ - Line: 688, + Line: 693, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 685, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 691, - SyntaxPatterns: []ir.PatternString{{Line: 691, Value: "type $x struct { $*_; sync.RWMutex; $*_ }"}}, + Line: 696, + SyntaxPatterns: []ir.PatternString{{Line: 696, Value: "type $x struct { $*_; sync.RWMutex; $*_ }"}}, ReportTemplate: "don't embed sync.RWMutex", WhereExpr: ir.FilterExpr{ - Line: 692, + Line: 697, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 685, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 695, - SyntaxPatterns: []ir.PatternString{{Line: 695, Value: "type $x struct { $*_; *sync.RWMutex; $*_ }"}}, + Line: 700, + SyntaxPatterns: []ir.PatternString{{Line: 700, Value: "type $x struct { $*_; *sync.RWMutex; $*_ }"}}, ReportTemplate: "don't embed *sync.RWMutex", WhereExpr: ir.FilterExpr{ - Line: 696, + Line: 701, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 680, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 685, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, }, }, { - Line: 704, + Line: 709, Name: "badSorting", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2209,83 +2321,83 @@ var PrecompiledRules = &ir.File{ DocAfter: "sort.Strings(xs)", Rules: []ir.Rule{ { - Line: 705, - SyntaxPatterns: []ir.PatternString{{Line: 705, Value: "$x = sort.IntSlice($x)"}}, + Line: 710, + SyntaxPatterns: []ir.PatternString{{Line: 710, Value: "$x = sort.IntSlice($x)"}}, ReportTemplate: "suspicious sort.IntSlice usage, maybe sort.Ints was intended?", SuggestTemplate: "sort.Ints($x)", WhereExpr: ir.FilterExpr{ - Line: 706, + Line: 711, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]int`)", Value: "x", - Args: []ir.FilterExpr{{Line: 706, Op: ir.FilterStringOp, Src: "`[]int`", Value: "[]int"}}, + Args: []ir.FilterExpr{{Line: 711, Op: ir.FilterStringOp, Src: "`[]int`", Value: "[]int"}}, }, }, { - Line: 710, - SyntaxPatterns: []ir.PatternString{{Line: 710, Value: "$x = sort.Float64Slice($x)"}}, + Line: 715, + SyntaxPatterns: []ir.PatternString{{Line: 715, Value: "$x = sort.Float64Slice($x)"}}, ReportTemplate: "suspicious sort.Float64s usage, maybe sort.Float64s was intended?", SuggestTemplate: "sort.Float64s($x)", WhereExpr: ir.FilterExpr{ - Line: 711, + Line: 716, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]float64`)", Value: "x", - Args: []ir.FilterExpr{{Line: 711, Op: ir.FilterStringOp, Src: "`[]float64`", Value: "[]float64"}}, + Args: []ir.FilterExpr{{Line: 716, Op: ir.FilterStringOp, Src: "`[]float64`", Value: "[]float64"}}, }, }, { - Line: 715, - SyntaxPatterns: []ir.PatternString{{Line: 715, Value: "$x = sort.StringSlice($x)"}}, + Line: 720, + SyntaxPatterns: []ir.PatternString{{Line: 720, Value: "$x = sort.StringSlice($x)"}}, ReportTemplate: "suspicious sort.StringSlice usage, maybe sort.Strings was intended?", SuggestTemplate: "sort.Strings($x)", WhereExpr: ir.FilterExpr{ - Line: 716, + Line: 721, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 716, Op: ir.FilterStringOp, Src: "`[]string`", Value: "[]string"}}, + Args: []ir.FilterExpr{{Line: 721, Op: ir.FilterStringOp, Src: "`[]string`", Value: "[]string"}}, }, }, }, }, { - Line: 725, + Line: 730, Name: "externalErrorReassign", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, - DocSummary: "Detects suspicious reassigment of error from another package", + DocSummary: "Detects suspicious reassignment of error from another package", DocBefore: "io.EOF = nil", DocAfter: "/* don't do it */", Rules: []ir.Rule{{ - Line: 726, - SyntaxPatterns: []ir.PatternString{{Line: 726, Value: "$pkg.$err = $x"}}, - ReportTemplate: "suspicious reassigment of error from another package", + Line: 731, + SyntaxPatterns: []ir.PatternString{{Line: 731, Value: "$pkg.$err = $x"}}, + ReportTemplate: "suspicious reassignment of error from another package", WhereExpr: ir.FilterExpr{ - Line: 727, + Line: 732, Op: ir.FilterAndOp, Src: "m[\"err\"].Type.Is(`error`) && m[\"pkg\"].Object.Is(`PkgName`)", Args: []ir.FilterExpr{ { - Line: 727, + Line: 732, Op: ir.FilterVarTypeIsOp, Src: "m[\"err\"].Type.Is(`error`)", Value: "err", - Args: []ir.FilterExpr{{Line: 727, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, + Args: []ir.FilterExpr{{Line: 732, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, }, { - Line: 727, + Line: 732, Op: ir.FilterVarObjectIsOp, Src: "m[\"pkg\"].Object.Is(`PkgName`)", Value: "pkg", - Args: []ir.FilterExpr{{Line: 727, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, + Args: []ir.FilterExpr{{Line: 732, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, }, }, }, }}, }, { - Line: 735, + Line: 740, Name: "emptyDecl", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2294,24 +2406,24 @@ var PrecompiledRules = &ir.File{ DocAfter: "/* nothing */", Rules: []ir.Rule{ { - Line: 736, - SyntaxPatterns: []ir.PatternString{{Line: 736, Value: "var()"}}, + Line: 741, + SyntaxPatterns: []ir.PatternString{{Line: 741, Value: "var()"}}, ReportTemplate: "empty var() block", }, { - Line: 737, - SyntaxPatterns: []ir.PatternString{{Line: 737, Value: "const()"}}, + Line: 742, + SyntaxPatterns: []ir.PatternString{{Line: 742, Value: "const()"}}, ReportTemplate: "empty const() block", }, { - Line: 738, - SyntaxPatterns: []ir.PatternString{{Line: 738, Value: "type()"}}, + Line: 743, + SyntaxPatterns: []ir.PatternString{{Line: 743, Value: "type()"}}, ReportTemplate: "empty type() block", }, }, }, { - Line: 745, + Line: 750, Name: "dynamicFmtString", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2320,16 +2432,16 @@ var PrecompiledRules = &ir.File{ DocAfter: "fmt.Errorf(\"%s\", msg)", Rules: []ir.Rule{ { - Line: 746, - SyntaxPatterns: []ir.PatternString{{Line: 746, Value: "fmt.Errorf($f)"}}, + Line: 751, + SyntaxPatterns: []ir.PatternString{{Line: 751, Value: "fmt.Errorf($f)"}}, ReportTemplate: "use errors.New($f) or fmt.Errorf(\"%s\", $f) instead", SuggestTemplate: "errors.New($f)", WhereExpr: ir.FilterExpr{ - Line: 747, + Line: 752, Op: ir.FilterNotOp, Src: "!m[\"f\"].Const", Args: []ir.FilterExpr{{ - Line: 747, + Line: 752, Op: ir.FilterVarConstOp, Src: "m[\"f\"].Const", Value: "f", @@ -2337,15 +2449,15 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 751, - SyntaxPatterns: []ir.PatternString{{Line: 751, Value: "fmt.Errorf($f($*args))"}}, + Line: 756, + SyntaxPatterns: []ir.PatternString{{Line: 756, Value: "fmt.Errorf($f($*args))"}}, ReportTemplate: "use errors.New($f($*args)) or fmt.Errorf(\"%s\", $f($*args)) instead", SuggestTemplate: "errors.New($f($*args))", }, }, }, { - Line: 760, + Line: 765, Name: "stringsCompare", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2354,31 +2466,121 @@ var PrecompiledRules = &ir.File{ DocAfter: "x < y", Rules: []ir.Rule{ { - Line: 761, - SyntaxPatterns: []ir.PatternString{{Line: 761, Value: "strings.Compare($s1, $s2) == 0"}}, + Line: 766, + SyntaxPatterns: []ir.PatternString{{Line: 766, Value: "strings.Compare($s1, $s2) == 0"}}, ReportTemplate: "suggestion: $s1 == $s2", SuggestTemplate: "$s1 == $s2", }, { - Line: 764, + Line: 769, SyntaxPatterns: []ir.PatternString{ - {Line: 764, Value: "strings.Compare($s1, $s2) == -1"}, - {Line: 765, Value: "strings.Compare($s1, $s2) < 0"}, + {Line: 769, Value: "strings.Compare($s1, $s2) == -1"}, + {Line: 770, Value: "strings.Compare($s1, $s2) < 0"}, }, ReportTemplate: "suggestion: $s1 < $s2", SuggestTemplate: "$s1 < $s2", }, { - Line: 768, + Line: 773, SyntaxPatterns: []ir.PatternString{ - {Line: 768, Value: "strings.Compare($s1, $s2) == 1"}, - {Line: 769, Value: "strings.Compare($s1, $s2) > 0"}, + {Line: 773, Value: "strings.Compare($s1, $s2) == 1"}, + {Line: 774, Value: "strings.Compare($s1, $s2) > 0"}, }, ReportTemplate: "suggestion: $s1 > $s2", SuggestTemplate: "$s1 > $s2", }, }, }, + { + Line: 782, + Name: "uncheckedInlineErr", + MatcherName: "m", + DocTags: []string{"diagnostic", "experimental"}, + DocSummary: "Detects unchecked errors in if statements", + DocBefore: "if err := expr(); err2 != nil { /*...*/ }", + DocAfter: "if err := expr(); err != nil { /*...*/ }", + Rules: []ir.Rule{{ + Line: 783, + SyntaxPatterns: []ir.PatternString{ + {Line: 784, Value: "if $err := $_($*_); $err2 != nil { $*_ }"}, + {Line: 785, Value: "if $err = $_($*_); $err2 != nil { $*_ }"}, + {Line: 786, Value: "if $*_, $err := $_($*_); $err2 != nil { $*_ }"}, + {Line: 787, Value: "if $*_, $err = $_($*_); $err2 != nil { $*_ }"}, + }, + ReportTemplate: "$err error is unchecked, maybe intended to check it instead of $err2", + WhereExpr: ir.FilterExpr{ + Line: 788, + Op: ir.FilterAndOp, + Src: "m[\"err\"].Type.Implements(\"error\") && m[\"err2\"].Type.Implements(\"error\") &&\n\tm[\"err\"].Text != m[\"err2\"].Text", + Args: []ir.FilterExpr{ + { + Line: 788, + Op: ir.FilterAndOp, + Src: "m[\"err\"].Type.Implements(\"error\") && m[\"err2\"].Type.Implements(\"error\")", + Args: []ir.FilterExpr{ + { + Line: 788, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"err\"].Type.Implements(\"error\")", + Value: "err", + Args: []ir.FilterExpr{{Line: 788, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, + }, + { + Line: 788, + Op: ir.FilterVarTypeImplementsOp, + Src: "m[\"err2\"].Type.Implements(\"error\")", + Value: "err2", + Args: []ir.FilterExpr{{Line: 788, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, + }, + }, + }, + { + Line: 789, + Op: ir.FilterNeqOp, + Src: "m[\"err\"].Text != m[\"err2\"].Text", + Args: []ir.FilterExpr{ + {Line: 789, Op: ir.FilterVarTextOp, Src: "m[\"err\"].Text", Value: "err"}, + {Line: 789, Op: ir.FilterVarTextOp, Src: "m[\"err2\"].Text", Value: "err2"}, + }, + }, + }, + }, + LocationVar: "err", + }}, + }, + { + Line: 798, + Name: "badSyncOnceFunc", + MatcherName: "m", + DocTags: []string{"diagnostic", "experimental"}, + DocSummary: "Detects bad usage of sync.OnceFunc", + DocBefore: "sync.OnceFunc(foo)()", + DocAfter: "fooOnce := sync.OnceFunc(foo); ...; fooOnce()", + Rules: []ir.Rule{ + { + Line: 799, + SyntaxPatterns: []ir.PatternString{{Line: 799, Value: "$*_; sync.OnceFunc($x); $*_;"}}, + ReportTemplate: "possible sync.OnceFunc misuse, sync.OnceFunc($x) result is not used", + WhereExpr: ir.FilterExpr{ + Line: 801, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.21\")", + Value: "1.21", + }, + }, + { + Line: 803, + SyntaxPatterns: []ir.PatternString{{Line: 803, Value: "sync.OnceFunc($x)()"}}, + ReportTemplate: "possible sync.OnceFunc misuse, consider to assign sync.OnceFunc($x) to a variable", + WhereExpr: ir.FilterExpr{ + Line: 805, + Op: ir.FilterGoVersionGreaterEqThanOp, + Src: "m.GoVersion().GreaterEqThan(\"1.21\")", + Value: "1.21", + }, + }, + }, + }, }, } diff --git a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go index b369a43447..a1a399fdaa 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/singleCaseSwitch_checker.go @@ -5,14 +5,15 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "golang.org/x/tools/go/ast/astutil" ) func init() { var info linter.CheckerInfo info.Name = "singleCaseSwitch" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects switch statements that could be better written as if statement" info.Before = ` switch x := x.(type) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go index 2f9ac62e1f..d83d7fd5a1 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyReassign_checker.go @@ -5,7 +5,8 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "sloppyReassign" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious/confusing re-assignments" info.Before = `if err = f(); err != nil { return err }` info.After = `if err := f(); err != nil { return err }` diff --git a/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go index 554197768e..454ab78b19 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sloppyTypeAssert_checker.go @@ -5,14 +5,15 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "sloppyTypeAssert" - info.Tags = []string{"diagnostic"} + info.Tags = []string{linter.DiagnosticTag} info.Summary = "Detects redundant type assertions" info.Before = ` func f(r io.Reader) interface{} { diff --git a/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go index 29550da3fb..22ef3b16a7 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sortSlice_checker.go @@ -6,7 +6,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "sortSlice" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects suspicious sort.Slice calls" info.Before = `sort.Slice(xs, func(i, j) bool { return keys[i] < keys[j] })` info.After = `sort.Slice(kv, func(i, j) bool { return kv[i].key < kv[j].key })` diff --git a/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go index eb3b49d881..8a132b5860 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go @@ -5,14 +5,15 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" ) func init() { var info linter.CheckerInfo info.Name = "sqlQuery" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects issue in Query() and Exec() calls" info.Before = `_, err := db.Query("UPDATE ...")` info.After = `_, err := db.Exec("UPDATE ...")` diff --git a/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go b/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go index 5ec2881b4b..f8e4b9b3c0 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/todoCommentWithoutDetail_checker.go @@ -5,13 +5,13 @@ import ( "regexp" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "todoCommentWithoutDetail" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects TODO comments without detail/assignee" info.Before = ` // TODO diff --git a/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go b/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go index 4d4dcc26e7..57411ba249 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/tooManyResults_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "tooManyResultsChecker" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "maxResults": { Value: 5, diff --git a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go index 9d40c2b631..b369025267 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/truncateCmp_checker.go @@ -6,7 +6,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astp" ) @@ -14,7 +15,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "truncateCmp" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "skipArchDependent": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go index d87657c3b9..e0d20fd4c5 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeAssertChain_checker.go @@ -6,7 +6,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" @@ -15,7 +16,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "typeAssertChain" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects repeated type assertions and suggests to replace them with type switch statement" info.Before = ` if x, ok := v.(T1); ok { diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go index bc59eef1ca..11381c4014 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeDefFirst_checker.go @@ -5,14 +5,13 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" - "golang.org/x/exp/typeparams" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "typeDefFirst" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects method declarations preceding the type definition itself" info.Before = ` func (r rec) Method() {} @@ -81,7 +80,7 @@ func (c *typeDefFirstChecker) receiverType(e ast.Expr) string { return e.Name case *ast.IndexExpr: return c.receiverType(e.X) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return c.receiverType(e.X) default: panic("unreachable") diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go index 1e11e49372..4b27b17928 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeSwitchVar_checker.go @@ -5,7 +5,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/astp" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "typeSwitchVar" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects type switches that can benefit from type guard clause with variable" info.Before = ` switch v.(type) { diff --git a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go index cd8e04337a..e2e225ebf2 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/typeUnparen_checker.go @@ -4,7 +4,8 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcopy" "github.com/go-toolsmith/astequal" ) @@ -12,8 +13,8 @@ import ( func init() { var info linter.CheckerInfo info.Name = "typeUnparen" - info.Tags = []string{"style", "opinionated"} - info.Summary = "Detects unneded parenthesis inside type expressions and suggests to remove them" + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag} + info.Summary = "Detects unneeded parenthesis inside type expressions and suggests to remove them" info.Before = `type foo [](func([](func())))` info.After = `type foo []func([]func())` diff --git a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go index d0426a9a50..0ce2c89ba7 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/underef_checker.go @@ -5,7 +5,8 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astp" ) @@ -13,7 +14,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "underef" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Params = linter.CheckerParams{ "skipRecvDeref": { Value: true, diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go index bcca24d2a8..d0e83f3c2e 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unlabelStmt_checker.go @@ -6,13 +6,13 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "unlabelStmt" - info.Tags = []string{"style", "experimental"} + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} info.Summary = "Detects redundant statement labels" info.Before = ` derp: diff --git a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go index cce995d7a2..0401bf5d37 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unlambda_checker.go @@ -7,7 +7,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "unlambda" - info.Tags = []string{"style"} + info.Tags = []string{linter.StyleTag} info.Summary = "Detects function literals that can be simplified" info.Before = `func(x int) int { return fn(x) }` info.After = `fn` @@ -90,7 +91,7 @@ func (c *unlambdaChecker) VisitExpr(x ast.Expr) { } } - if len(result.Args) == n { + if c.lenArgs(result.Args) == n { c.warn(fn, callable) } } @@ -98,3 +99,20 @@ func (c *unlambdaChecker) VisitExpr(x ast.Expr) { func (c *unlambdaChecker) warn(cause ast.Node, suggestion string) { c.ctx.Warn(cause, "replace `%s` with `%s`", cause, suggestion) } + +func (c *unlambdaChecker) lenArgs(args []ast.Expr) int { + lenArgs := len(args) + + for _, arg := range args { + callExp, ok := arg.(*ast.CallExpr) + if !ok { + continue + } + + // Don't count function call. only args. + lenArgs-- + lenArgs += c.lenArgs(callExp.Args) + } + + return lenArgs +} diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go index 3149d9e87d..0d40addf75 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unnamedResult_checker.go @@ -5,13 +5,13 @@ import ( "go/types" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { var info linter.CheckerInfo info.Name = "unnamedResult" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Params = linter.CheckerParams{ "checkExported": { Value: false, diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go index 6cbdfdfd0b..b577ff4219 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryBlock_checker.go @@ -5,14 +5,15 @@ import ( "go/token" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astp" ) func init() { var info linter.CheckerInfo info.Name = "unnecessaryBlock" - info.Tags = []string{"style", "opinionated", "experimental"} + info.Tags = []string{linter.StyleTag, linter.OpinionatedTag, linter.ExperimentalTag} info.Summary = "Detects unnecessary braced statement blocks" info.Before = ` x := 1 diff --git a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go index ef72142a10..4c1ed41f6f 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/unnecessaryDefer_checker.go @@ -4,14 +4,15 @@ import ( "go/ast" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astfmt" ) func init() { var info linter.CheckerInfo info.Name = "unnecessaryDefer" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects redundantly deferred calls" info.Before = ` func() { @@ -36,7 +37,7 @@ type unnecessaryDeferChecker struct { // Visit implements the ast.Visitor. This visitor keeps track of the block // statement belongs to a function or any other block. If the block is not a // function and ends with a defer statement that should be OK since it's -// defering the outer function. +// deferring the outer function. func (c *unnecessaryDeferChecker) Visit(node ast.Node) ast.Visitor { switch n := node.(type) { case *ast.FuncDecl, *ast.FuncLit: diff --git a/vendor/github.com/go-critic/go-critic/checkers/utils.go b/vendor/github.com/go-critic/go-critic/checkers/utils.go index b71f24d749..4757bbf5f3 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/utils.go +++ b/vendor/github.com/go-critic/go-critic/checkers/utils.go @@ -5,7 +5,7 @@ import ( "go/types" "strings" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) // goStdlib contains `go list std` command output list. @@ -260,7 +260,7 @@ func isUnitTestFunc(ctx *linter.CheckerContext, fn *ast.FuncDecl) bool { return false } -// qualifiedName returns called expr fully-quallified name. +// qualifiedName returns called expr fully-qualified name. // // It works for simple identifiers like f => "f" and identifiers // from other package like pkg.f => "pkg.f". diff --git a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go index 831857c41a..3d7c9c1225 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/weakCond_checker.go @@ -6,7 +6,8 @@ import ( "github.com/go-critic/go-critic/checkers/internal/astwalk" "github.com/go-critic/go-critic/checkers/internal/lintutil" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" + "github.com/go-toolsmith/astcast" "github.com/go-toolsmith/astequal" "github.com/go-toolsmith/typep" @@ -16,7 +17,7 @@ import ( func init() { var info linter.CheckerInfo info.Name = "weakCond" - info.Tags = []string{"diagnostic", "experimental"} + info.Tags = []string{linter.DiagnosticTag, linter.ExperimentalTag} info.Summary = "Detects conditions that are unsafe due to not being exhaustive" info.Before = `xs != nil && xs[0] != nil` info.After = `len(xs) != 0 && xs[0] != nil` diff --git a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go index 6829433ea0..eaa53e5d5b 100644 --- a/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go +++ b/vendor/github.com/go-critic/go-critic/checkers/whyNoLint_checker.go @@ -6,17 +6,16 @@ import ( "strings" "github.com/go-critic/go-critic/checkers/internal/astwalk" - "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/linter" ) func init() { - info := linter.CheckerInfo{ - Name: "whyNoLint", - Tags: []string{"style", "experimental"}, - Summary: "Ensures that `//nolint` comments include an explanation", - Before: `//nolint`, - After: `//nolint // reason`, - } + var info linter.CheckerInfo + info.Name = "whyNoLint" + info.Tags = []string{linter.StyleTag, linter.ExperimentalTag} + info.Summary = "Ensures that `//nolint` comments include an explanation" + info.Before = `//nolint` + info.After = `//nolint // reason` collection.AddChecker(&info, func(ctx *linter.CheckerContext) (linter.FileWalker, error) { return astwalk.WalkerForComment(&whyNoLintChecker{ diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/context.go b/vendor/github.com/go-critic/go-critic/framework/linter/context.go deleted file mode 100644 index 6e108ab6a5..0000000000 --- a/vendor/github.com/go-critic/go-critic/framework/linter/context.go +++ /dev/null @@ -1,35 +0,0 @@ -package linter - -import ( - "go/ast" - "go/types" - "strconv" -) - -func resolvePkgObjects(ctx *Context, f *ast.File) { - ctx.PkgObjects = make(map[*types.PkgName]string, len(f.Imports)) - - for _, spec := range f.Imports { - if spec.Name != nil { - obj := ctx.TypesInfo.ObjectOf(spec.Name) - ctx.PkgObjects[obj.(*types.PkgName)] = spec.Name.Name - } else { - obj := ctx.TypesInfo.Implicits[spec] - ctx.PkgObjects[obj.(*types.PkgName)] = obj.Name() - } - } -} - -func resolvePkgRenames(ctx *Context, f *ast.File) { - ctx.PkgRenames = make(map[string]string) - - for _, spec := range f.Imports { - if spec.Name != nil { - path, err := strconv.Unquote(spec.Path.Value) - if err != nil { - panic(err) - } - ctx.PkgRenames[path] = spec.Name.Name - } - } -} diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/go_version.go b/vendor/github.com/go-critic/go-critic/linter/go_version.go similarity index 91% rename from vendor/github.com/go-critic/go-critic/framework/linter/go_version.go rename to vendor/github.com/go-critic/go-critic/linter/go_version.go index d8091d4535..b5ef2f75ff 100644 --- a/vendor/github.com/go-critic/go-critic/framework/linter/go_version.go +++ b/vendor/github.com/go-critic/go-critic/linter/go_version.go @@ -18,13 +18,14 @@ type GoVersion struct { // // As a special case, Major=0 covers all versions. func (v GoVersion) GreaterOrEqual(other GoVersion) bool { - if v.Major == 0 { + switch { + case v.Major == 0: return true - } - if v.Major == other.Major { + case v.Major == other.Major: return v.Minor >= other.Minor + default: + return v.Major >= other.Major } - return v.Major >= other.Major } func ParseGoVersion(version string) (GoVersion, error) { diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go b/vendor/github.com/go-critic/go-critic/linter/helpers.go similarity index 100% rename from vendor/github.com/go-critic/go-critic/framework/linter/checkers_db.go rename to vendor/github.com/go-critic/go-critic/linter/helpers.go diff --git a/vendor/github.com/go-critic/go-critic/framework/linter/linter.go b/vendor/github.com/go-critic/go-critic/linter/linter.go similarity index 81% rename from vendor/github.com/go-critic/go-critic/framework/linter/linter.go rename to vendor/github.com/go-critic/go-critic/linter/linter.go index 750ff7cd9f..d4bc17536e 100644 --- a/vendor/github.com/go-critic/go-critic/framework/linter/linter.go +++ b/vendor/github.com/go-critic/go-critic/linter/linter.go @@ -4,11 +4,33 @@ import ( "go/ast" "go/token" "go/types" + "strconv" + "strings" "github.com/go-toolsmith/astfmt" - "golang.org/x/exp/typeparams" ) +const ( + DiagnosticTag = "diagnostic" + ExperimentalTag = "experimental" + OpinionatedTag = "opinionated" + PerformanceTag = "performance" + SecurityTag = "security" + StyleTag = "style" +) + +// UnknownType is a special sentinel value that is returned from the CheckerContext.TypeOf +// method instead of the nil type. +var UnknownType types.Type = types.Typ[types.Invalid] + +// FileWalker is an interface every checker should implement. +// +// The WalkFile method is executed for every Go file inside the +// package that is being checked. +type FileWalker interface { + WalkFile(*ast.File) +} + // CheckerCollection provides additional information for a group of checkers. type CheckerCollection struct { // URL is a link for a main source of information on the collection. @@ -124,6 +146,14 @@ type Checker struct { fileWalker FileWalker } +// NewChecker returns initialized checker identified by an info. +// info must be non-nil. +// Returns an error if info describes a checker that was not properly registered, +// or if checker fails to initialize. +func NewChecker(ctx *Context, info *CheckerInfo) (*Checker, error) { + return newChecker(ctx, info) +} + // Check runs rule checker over file f. func (c *Checker) Check(f *ast.File) []Warning { c.ctx.warnings = c.ctx.warnings[:0] @@ -141,9 +171,7 @@ type QuickFix struct { // Warning represents issue that is found by checker. type Warning struct { - // Node is an AST node that caused warning to trigger. - // Can be used to obtain proper error location. - Node ast.Node + Pos token.Pos // Text is warning message without source location info. Text string @@ -162,14 +190,6 @@ func (warn Warning) HasQuickFix() bool { return warn.Suggestion.Replacement != nil } -// NewChecker returns initialized checker identified by an info. -// info must be non-nil. -// Returns an error if info describes a checker that was not properly registered, -// or if checker fails to initialize. -func NewChecker(ctx *Context, info *CheckerInfo) (*Checker, error) { - return newChecker(ctx, info) -} - // Context is a readonly state shared among every checker. type Context struct { // TypesInfo carries parsed packages types information. @@ -279,25 +299,31 @@ type CheckerContext struct { // Warn adds a Warning to checker output. func (ctx *CheckerContext) Warn(node ast.Node, format string, args ...interface{}) { + ctx.WarnWithPos(node.Pos(), format, args...) +} + +// WarnFixable emits a warning with a fix suggestion provided by the caller. +func (ctx *CheckerContext) WarnFixable(node ast.Node, fix QuickFix, format string, args ...interface{}) { + ctx.WarnFixableWithPos(node.Pos(), fix, format, args...) +} + +// WarnWithPos adds a Warning to checker output. Useful for ruleguard's Report func. +func (ctx *CheckerContext) WarnWithPos(pos token.Pos, format string, args ...interface{}) { ctx.warnings = append(ctx.warnings, Warning{ Text: ctx.printer.Sprintf(format, args...), - Node: node, + Pos: pos, }) } -// WarnFixable emits a warning with a fix suggestion provided by the caller. -func (ctx *CheckerContext) WarnFixable(node ast.Node, fix QuickFix, format string, args ...interface{}) { +// WarnFixableWithPos adds a Warning to checker output. Useful for ruleguard's Report func. +func (ctx *CheckerContext) WarnFixableWithPos(pos token.Pos, fix QuickFix, format string, args ...interface{}) { ctx.warnings = append(ctx.warnings, Warning{ Text: ctx.printer.Sprintf(format, args...), - Node: node, + Pos: pos, Suggestion: fix, }) } -// UnknownType is a special sentinel value that is returned from the CheckerContext.TypeOf -// method instead of the nil type. -var UnknownType types.Type = types.Typ[types.Invalid] - // TypeOf returns the type of expression x. // // Unlike TypesInfo.TypeOf, it never returns nil. @@ -319,16 +345,57 @@ func (ctx *CheckerContext) TypeOf(x ast.Expr) types.Type { // // Unlike SizesInfo.SizeOf, it will not panic on generic types. func (ctx *CheckerContext) SizeOf(typ types.Type) (int64, bool) { - if _, ok := typ.(*typeparams.TypeParam); ok { + if _, ok := typ.(*types.TypeParam); ok { + return 0, false + } + if named, ok := typ.(*types.Named); ok && named.TypeParams() != nil { return 0, false } - return ctx.SizesInfo.Sizeof(typ), true + return ctx.safeSizesInfoSizeof(typ) } -// FileWalker is an interface every checker should implement. -// -// The WalkFile method is executed for every Go file inside the -// package that is being checked. -type FileWalker interface { - WalkFile(*ast.File) +// safeSizesInfoSizeof unlike SizesInfo.Sizeof will not panic on struct with generic fields. +// it will catch a panic and recover from it, see https://github.com/go-critic/go-critic/issues/1354 +func (ctx *CheckerContext) safeSizesInfoSizeof(typ types.Type) (size int64, ok bool) { + ok = true + defer func() { + if r := recover(); r != nil { + if strings.Contains(r.(string), "assertion failed") { + size, ok = 0, false + } else { + panic(r) + } + } + }() + + size = ctx.SizesInfo.Sizeof(typ) + return size, ok +} + +func resolvePkgObjects(ctx *Context, f *ast.File) { + ctx.PkgObjects = make(map[*types.PkgName]string, len(f.Imports)) + + for _, spec := range f.Imports { + if spec.Name != nil { + obj := ctx.TypesInfo.ObjectOf(spec.Name) + ctx.PkgObjects[obj.(*types.PkgName)] = spec.Name.Name + } else { + obj := ctx.TypesInfo.Implicits[spec] + ctx.PkgObjects[obj.(*types.PkgName)] = obj.Name() + } + } +} + +func resolvePkgRenames(ctx *Context, f *ast.File) { + ctx.PkgRenames = make(map[string]string) + + for _, spec := range f.Imports { + if spec.Name != nil { + path, err := strconv.Unquote(spec.Path.Value) + if err != nil { + panic(err) + } + ctx.PkgRenames[path] = spec.Name.Name + } + } } diff --git a/vendor/github.com/go-toolsmith/astcast/.travis.yml b/vendor/github.com/go-toolsmith/astcast/.travis.yml deleted file mode 100644 index c32ac00627..0000000000 --- a/vendor/github.com/go-toolsmith/astcast/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/astcast/README.md b/vendor/github.com/go-toolsmith/astcast/README.md index b618da4615..19ca0e71d2 100644 --- a/vendor/github.com/go-toolsmith/astcast/README.md +++ b/vendor/github.com/go-toolsmith/astcast/README.md @@ -1,15 +1,19 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astcast)](https://goreportcard.com/report/github.com/go-toolsmith/astcast) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astcast?status.svg)](https://godoc.org/github.com/go-toolsmith/astcast) - # astcast -Package astcast wraps type assertion operations in such way that you don't have +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astcast` wraps type assertion operations in such way that you don't have to worry about nil pointer results anymore. ## Installation +Go version 1.16+ + ```bash -go get -v github.com/go-toolsmith/astcast +go get github.com/go-toolsmith/astcast ``` ## Example @@ -84,3 +88,16 @@ func main() { fmt.Printf("%T %s\n", bar, bar.Name) } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astcast/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astcast/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astcast +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astcast +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astcast +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astcast +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astcast +[version-url]: https://github.com/go-toolsmith/astcast/releases diff --git a/vendor/github.com/go-toolsmith/astcopy/.travis.yml b/vendor/github.com/go-toolsmith/astcopy/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/astcopy/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astcopy/README.md b/vendor/github.com/go-toolsmith/astcopy/README.md index 4dae5c41b2..7adc665250 100644 --- a/vendor/github.com/go-toolsmith/astcopy/README.md +++ b/vendor/github.com/go-toolsmith/astcopy/README.md @@ -1,13 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astcopy)](https://goreportcard.com/report/github.com/go-toolsmith/astcopy) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astcopy?status.svg)](https://godoc.org/github.com/go-toolsmith/astcopy) -[![Build Status](https://travis-ci.org/go-toolsmith/astcopy.svg?branch=master)](https://travis-ci.org/go-toolsmith/astcopy) - # astcopy -Package astcopy implements Go AST reflection-free deep copy operations. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astcopy` implements Go AST reflection-free deep copy operations. ## Installation: +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astcopy ``` @@ -39,3 +42,16 @@ func main() { fmt.Println(astequal.Expr(x, y)) // => false } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astp/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astp/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astp +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astp +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astp +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astp +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astp +[version-url]: https://github.com/go-toolsmith/astp/releases diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy.go b/vendor/github.com/go-toolsmith/astcopy/astcopy.go index 91e1f31096..72bc58ce6d 100644 --- a/vendor/github.com/go-toolsmith/astcopy/astcopy.go +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy.go @@ -346,21 +346,6 @@ func FieldList(x *ast.FieldList) *ast.FieldList { return &cp } -// FuncType returns x deep copy. -// Copy of nil argument is nil. -func FuncType(x *ast.FuncType) *ast.FuncType { - if x == nil { - return nil - } - cp := *x - cp.Params = FieldList(x.Params) - cp.Results = FieldList(x.Results) - if typeParams := typeparams.ForFuncType(x); typeParams != nil { - *typeparams.ForFuncType(&cp) = *FieldList(typeParams) - } - return &cp -} - // InterfaceType returns x deep copy. // Copy of nil argument is nil. func InterfaceType(x *ast.InterfaceType) *ast.InterfaceType { @@ -435,23 +420,6 @@ func ValueSpec(x *ast.ValueSpec) *ast.ValueSpec { return &cp } -// TypeSpec returns x deep copy. -// Copy of nil argument is nil. -func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { - if x == nil { - return nil - } - cp := *x - cp.Name = Ident(x.Name) - cp.Type = copyExpr(x.Type) - cp.Doc = CommentGroup(x.Doc) - cp.Comment = CommentGroup(x.Comment) - if typeParams := typeparams.ForTypeSpec(x); typeParams != nil { - *typeparams.ForTypeSpec(&cp) = *FieldList(typeParams) - } - return &cp -} - // Spec returns x deep copy. // Copy of nil argument is nil. func Spec(x ast.Spec) ast.Spec { diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy_go117.go b/vendor/github.com/go-toolsmith/astcopy/astcopy_go117.go new file mode 100644 index 0000000000..1b748bae50 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy_go117.go @@ -0,0 +1,30 @@ +//go:build !go1.18 +// +build !go1.18 + +package astcopy + +// FuncType returns x deep copy. +// Copy of nil argument is nil. +func FuncType(x *ast.FuncType) *ast.FuncType { + if x == nil { + return nil + } + cp := *x + cp.Params = FieldList(x.Params) + cp.Results = FieldList(x.Results) + return &cp +} + +// TypeSpec returns x deep copy. +// Copy of nil argument is nil. +func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { + if x == nil { + return nil + } + cp := *x + cp.Name = Ident(x.Name) + cp.Type = copyExpr(x.Type) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + return &cp +} diff --git a/vendor/github.com/go-toolsmith/astcopy/astcopy_go118.go b/vendor/github.com/go-toolsmith/astcopy/astcopy_go118.go new file mode 100644 index 0000000000..72f800acc1 --- /dev/null +++ b/vendor/github.com/go-toolsmith/astcopy/astcopy_go118.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +package astcopy + +import ( + "go/ast" +) + +// FuncType returns x deep copy. +// Copy of nil argument is nil. +func FuncType(x *ast.FuncType) *ast.FuncType { + if x == nil { + return nil + } + cp := *x + cp.Params = FieldList(x.Params) + cp.Results = FieldList(x.Results) + cp.TypeParams = FieldList(x.TypeParams) + return &cp +} + +// TypeSpec returns x deep copy. +// Copy of nil argument is nil. +func TypeSpec(x *ast.TypeSpec) *ast.TypeSpec { + if x == nil { + return nil + } + cp := *x + cp.Name = Ident(x.Name) + cp.Type = copyExpr(x.Type) + cp.Doc = CommentGroup(x.Doc) + cp.Comment = CommentGroup(x.Comment) + cp.TypeParams = FieldList(x.TypeParams) + return &cp +} diff --git a/vendor/github.com/go-toolsmith/astequal/.travis.yml b/vendor/github.com/go-toolsmith/astequal/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/astequal/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astequal/README.md b/vendor/github.com/go-toolsmith/astequal/README.md index b14f80f6f7..db5e3a8c97 100644 --- a/vendor/github.com/go-toolsmith/astequal/README.md +++ b/vendor/github.com/go-toolsmith/astequal/README.md @@ -1,14 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astequal)](https://goreportcard.com/report/github.com/go-toolsmith/astequal) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astequal?status.svg)](https://godoc.org/github.com/go-toolsmith/astequal) -[![Build Status](https://travis-ci.org/go-toolsmith/astequal.svg?branch=master)](https://travis-ci.org/go-toolsmith/astequal) - - # astequal -Package astequal provides AST (deep) equallity check operations. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astequal` provides AST (deep) equallity check operations. ## Installation: +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astequal ``` @@ -65,3 +67,16 @@ BenchmarkEqualExpr/astequal.Expr-8 5000000 298 ns/op 0 B/op 0 BenchmarkEqualExpr/astequal.Node-8 3000000 409 ns/op 0 B/op 0 allocs/op BenchmarkEqualExpr/reflect.DeepEqual-8 50000 38898 ns/op 10185 B/op 156 allocs/op ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astequal/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astequal/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astequal +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astequal +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astequal +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astequal +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astequal +[version-url]: https://github.com/go-toolsmith/astequal/releases diff --git a/vendor/github.com/go-toolsmith/astfmt/.travis.yml b/vendor/github.com/go-toolsmith/astfmt/.travis.yml deleted file mode 100644 index c32ac00627..0000000000 --- a/vendor/github.com/go-toolsmith/astfmt/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/astfmt/README.md b/vendor/github.com/go-toolsmith/astfmt/README.md index 954c92bf46..00f790fd2f 100644 --- a/vendor/github.com/go-toolsmith/astfmt/README.md +++ b/vendor/github.com/go-toolsmith/astfmt/README.md @@ -1,13 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/strparse)](https://goreportcard.com/report/github.com/go-toolsmith/strparse) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/strparse?status.svg)](https://godoc.org/github.com/go-toolsmith/strparse) - - # astfmt -Package astfmt implements ast.Node formatting with fmt-like API. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astfmt` implements ast.Node formatting with fmt-like API. ## Installation +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astfmt ``` @@ -37,3 +40,16 @@ func Example() { pp.Println(x) // => foo(bar(baz(1 + 2))) } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astfmt/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astfmt/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astfmt +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astfmt +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astfmt +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astfmt +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astfmt +[version-url]: https://github.com/go-toolsmith/astfmt/releases diff --git a/vendor/github.com/go-toolsmith/astp/.gitignore b/vendor/github.com/go-toolsmith/astp/.gitignore deleted file mode 100644 index 1f6187ecd6..0000000000 --- a/vendor/github.com/go-toolsmith/astp/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -bin -pkg -src/main -tmp \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astp/.travis.yml b/vendor/github.com/go-toolsmith/astp/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/astp/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/astp/README.md b/vendor/github.com/go-toolsmith/astp/README.md index 7313c6ab8e..cf5197e811 100644 --- a/vendor/github.com/go-toolsmith/astp/README.md +++ b/vendor/github.com/go-toolsmith/astp/README.md @@ -1,14 +1,16 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/astp)](https://goreportcard.com/report/github.com/go-toolsmith/astp) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/astp?status.svg)](https://godoc.org/github.com/go-toolsmith/astp) -[![Build Status](https://travis-ci.org/go-toolsmith/astp.svg?branch=master)](https://travis-ci.org/go-toolsmith/astp) - - # astp -Package astp provides AST predicates. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `astp` provides AST predicates. ## Installation: +Go version 1.16+ + ```bash go get github.com/go-toolsmith/astp ``` @@ -37,3 +39,16 @@ func main() { } } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/astp/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/astp/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/astp +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/astp +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/astp +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/astp +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/astp +[version-url]: https://github.com/go-toolsmith/astp/releases diff --git a/vendor/github.com/go-toolsmith/strparse/.travis.yml b/vendor/github.com/go-toolsmith/strparse/.travis.yml deleted file mode 100644 index 8994d395c6..0000000000 --- a/vendor/github.com/go-toolsmith/strparse/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go tool vet . - - go test -v -race ./... \ No newline at end of file diff --git a/vendor/github.com/go-toolsmith/strparse/README.md b/vendor/github.com/go-toolsmith/strparse/README.md index ae80a5398a..ac04d516fe 100644 --- a/vendor/github.com/go-toolsmith/strparse/README.md +++ b/vendor/github.com/go-toolsmith/strparse/README.md @@ -1,15 +1,17 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/strparse)](https://goreportcard.com/report/github.com/go-toolsmith/strparse) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/strparse?status.svg)](https://godoc.org/github.com/go-toolsmith/strparse) -[![Build Status](https://travis-ci.org/go-toolsmith/strparse.svg?branch=master)](https://travis-ci.org/go-toolsmith/strparse) - - # strparse -Package strparse provides convenience wrappers around `go/parser` for simple +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `strparse` provides convenience wrappers around `go/parser` for simple expression, statement and declaretion parsing from string. ## Installation +Go version 1.16+ + ```bash go get github.com/go-toolsmith/strparse ``` @@ -20,8 +22,8 @@ go get github.com/go-toolsmith/strparse package main import ( - "go-toolsmith/astequal" - "go-toolsmith/strparse" + "github.com/go-toolsmith/astequal" + "github.com/go-toolsmith/strparse" ) func main() { @@ -30,5 +32,17 @@ func main() { y := strparse.Expr(` 1+f( v[0].X ) `) fmt.Println(astequal.Expr(x, y)) // => true } - ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/strparse/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/strparse/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/strparse +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/strparse +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/strparse +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/strparse +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/strparse +[version-url]: https://github.com/go-toolsmith/strparse/releases diff --git a/vendor/github.com/go-toolsmith/typep/.travis.yml b/vendor/github.com/go-toolsmith/typep/.travis.yml deleted file mode 100644 index d3ff3cca8b..0000000000 --- a/vendor/github.com/go-toolsmith/typep/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.x -install: - - # Prevent default install action "go get -t -v ./...". -script: - - go get -t -v ./... - - go vet ./... - - go test -v -race ./... diff --git a/vendor/github.com/go-toolsmith/typep/README.md b/vendor/github.com/go-toolsmith/typep/README.md index f7979148fa..77478c4434 100644 --- a/vendor/github.com/go-toolsmith/typep/README.md +++ b/vendor/github.com/go-toolsmith/typep/README.md @@ -1,15 +1,18 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/go-toolsmith/typep)](https://goreportcard.com/report/github.com/go-toolsmith/typep) -[![GoDoc](https://godoc.org/github.com/go-toolsmith/typep?status.svg)](https://godoc.org/github.com/go-toolsmith/typep) -[![Build Status](https://travis-ci.org/go-toolsmith/typep.svg?branch=master)](https://travis-ci.org/go-toolsmith/typep) - # typep -Package typep provides type predicates. +[![build-img]][build-url] +[![pkg-img]][pkg-url] +[![reportcard-img]][reportcard-url] +[![version-img]][version-url] + +Package `typep` provides type predicates. ## Installation: +Go version 1.16+ + ```bash -go get -v github.com/go-toolsmith/typep +go get github.com/go-toolsmith/typep ``` ## Example @@ -29,9 +32,23 @@ func main() { intTyp := types.Typ[types.Int] ptr := types.NewPointer(intTyp) arr := types.NewArray(intTyp, 64) + fmt.Println(typep.HasFloatProp(floatTyp)) // => true fmt.Println(typep.HasFloatProp(intTyp)) // => false fmt.Println(typep.IsPointer(ptr)) // => true fmt.Println(typep.IsArray(arr)) // => true } ``` + +## License + +[MIT License](LICENSE). + +[build-img]: https://github.com/go-toolsmith/typep/workflows/build/badge.svg +[build-url]: https://github.com/go-toolsmith/typep/actions +[pkg-img]: https://pkg.go.dev/badge/go-toolsmith/typep +[pkg-url]: https://pkg.go.dev/github.com/go-toolsmith/typep +[reportcard-img]: https://goreportcard.com/badge/go-toolsmith/typep +[reportcard-url]: https://goreportcard.com/report/go-toolsmith/typep +[version-img]: https://img.shields.io/github/v/release/go-toolsmith/typep +[version-url]: https://github.com/go-toolsmith/typep/releases diff --git a/vendor/github.com/go-toolsmith/typep/safeExpr.go b/vendor/github.com/go-toolsmith/typep/safe_expr.go similarity index 100% rename from vendor/github.com/go-toolsmith/typep/safeExpr.go rename to vendor/github.com/go-toolsmith/typep/safe_expr.go diff --git a/vendor/github.com/go-toolsmith/typep/simplePredicates.go b/vendor/github.com/go-toolsmith/typep/simple_predicates.go similarity index 99% rename from vendor/github.com/go-toolsmith/typep/simplePredicates.go rename to vendor/github.com/go-toolsmith/typep/simple_predicates.go index 3bc9c29c8f..61e7d5b7f7 100644 --- a/vendor/github.com/go-toolsmith/typep/simplePredicates.go +++ b/vendor/github.com/go-toolsmith/typep/simple_predicates.go @@ -1,4 +1,4 @@ -// Code generated by simplePredicates_generate.go; DO NOT EDIT +// Code generated by simple_predicates_generate.go; DO NOT EDIT package typep diff --git a/vendor/github.com/go-viper/mapstructure/v2/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig new file mode 100644 index 0000000000..1f664d13a5 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 0000000000..c66fc0d354 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.1; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.1/direnvrc" "sha256-17G+Mvt/JsyJrwsf7bqMr7ho7liHP+0Lo4RMIHgp0F8=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 0000000000..470e7ca2bd --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 0000000000..763143aa77 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,23 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/go-viper/mapstructure) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/go-viper/maptstructure + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - goimports + - staticcheck + # - stylecheck diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md new file mode 100644 index 0000000000..ae634d1cc0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -0,0 +1,101 @@ +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/go-viper/mapstructure/v2/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 0000000000..2b28db8948 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,59 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 0000000000..840d6adce0 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,334 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}, +) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddr(data.(string)) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddrPort(data.(string)) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/error.go b/vendor/github.com/go-viper/mapstructure/v2/error.go new file mode 100644 index 0000000000..47a99e5af3 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 0000000000..5a387d3299 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,273 @@ +{ + "nodes": { + "devenv": { + "inputs": { + "flake-compat": "flake-compat", + "nix": "nix", + "nixpkgs": "nixpkgs", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1702549996, + "narHash": "sha256-mEN+8gjWUXRxBCcixeth+jlDNuzxbpFwZNOEc4K22vw=", + "owner": "cachix", + "repo": "devenv", + "rev": "e681a99ffe2d2882f413a5d771129223c838ddce", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1701473968, + "narHash": "sha256-YcVE5emp1qQ8ieHUnxt1wCZCC3ZfAS+SRRWZ2TMda7E=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "34fed993f1674c8d06d58b37ce1e0fe5eebcb9f5", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1685518550, + "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1660459072, + "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "lowdown-src": { + "flake": false, + "locked": { + "lastModified": 1633514407, + "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", + "owner": "kristapsdz", + "repo": "lowdown", + "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "type": "github" + }, + "original": { + "owner": "kristapsdz", + "repo": "lowdown", + "type": "github" + } + }, + "nix": { + "inputs": { + "lowdown-src": "lowdown-src", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1676545802, + "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "owner": "domenkozar", + "repo": "nix", + "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "relaxed-flakes", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1678875422, + "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "dir": "lib", + "lastModified": 1701253981, + "narHash": "sha256-ztaDIyZ7HrTAfEEUt9AtTDNoCYxUdSd6NrRHaYOIxtk=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "e92039b55bcd58469325ded85d4f58dd5a4eaf58", + "type": "github" + }, + "original": { + "dir": "lib", + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1685801374, + "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.05", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1702539185, + "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1688056373, + "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_2" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 0000000000..4ed0f53311 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,39 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go new file mode 100644 index 0000000000..27f21bc721 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -0,0 +1,1562 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// # Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// # Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// # Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// # Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// # Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// # Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// # Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %w", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + + // Check input type and based on the input type jump to the proper func + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type()) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 0000000000..d0913fff6c --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 0000000000..f8255a1b17 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/README.md b/vendor/github.com/go-xmlfmt/xmlfmt/README.md index 4eb6d69a03..9f661ea2d2 100644 --- a/vendor/github.com/go-xmlfmt/xmlfmt/README.md +++ b/vendor/github.com/go-xmlfmt/xmlfmt/README.md @@ -3,7 +3,6 @@ [![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) [![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt) [![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt) -[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297) ## Synopsis @@ -15,9 +14,18 @@ package main import "github.com/go-xmlfmt/xmlfmt" func main() { - xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` + xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` x := xmlfmt.FormatXML(xml1, "\t", " ") print(x) + + // If the XML Comments have nested tags in them + xml1 = ` Fred + + 23456 ` + x = xmlfmt.FormatXML(xml1, "", " ", true) + print(x) } ``` @@ -27,48 +35,107 @@ Output: ```xml - a - + a - Some org-or-other - - Wouldnt you like to know - + Some org-or-other + Wouldnt you like to know - Pat - - Califia - + Pat + Califia + + + + Fred + + 23456 + ``` There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place. +Note that + +- the default line ending is handled by the package automatically now. For Windows it's `CRLF`, and standard for anywhere else. No need to change the default line ending now. +- the case of XML comments nested within XML comments is ***not*** supported. Please avoid them or use any other tools to correct them before using this package. +- don't turn on the `nestedTagsInComments` parameter blindly, as the code has become 10+ times more complicated because of it. + ## Command To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt): ``` -$ xmlfmt -XML Formatter -built on 2019-12-08 +$ xmlfmt -V +xmlfmt - XML Formatter +Copyright (C) 2016-2022, Antonio Sun The xmlfmt will format the XML string without rewriting the document -Options: - - -h, --help display help information - -f, --file *The xml file to read from (or stdin) - -p, --prefix each element begins on a new line and this prefix - -i, --indent[= ] indent string for nested elements +Built on 2022-02-06 +Version 1.1.1 + +$ xmlfmt +the required flag `-f, --file' was not specified + +Usage: + xmlfmt [OPTIONS] + +Application Options: + -f, --file= The xml file to read from (or "-" for stdin) [$XMLFMT_FILEI] + -p, --prefix= Each element begins on a new line and this prefix [$XMLFMT_PREFIX] + -i, --indent= Indent string for nested elements (default: ) [$XMLFMT_INDENT] + -n, --nested Nested tags in comments [$XMLFMT_NESTED] + -v, --verbose Verbose mode (Multiple -v options increase the verbosity) + -V, --version Show program version and exit + +Help Options: + -h, --help Show this help message + + +$ curl -sL https://pastebin.com/raw/z3euQ5PR | xmlfmt -f - + + + + a + + + + + Some org-or-other + Wouldnt you like to know + + + Pat + Califia + + + + + +$ curl -sL https://pastebin.com/raw/Zs0qy0qz | tee /tmp/xmlfmt.xml | xmlfmt -f - -n + + + Fred + + 23456 + + +$ XMLFMT_NESTED=true XMLFMT_PREFIX='|' xmlfmt -f /tmp/xmlfmt.xml + +| +| +| Fred +| +| 23456 +| ``` @@ -76,7 +143,7 @@ Options: ### The format -The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., +The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- most of the closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., When it comes to very big XML strings, which is what I’m dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me. @@ -175,4 +242,4 @@ echo ']+?)(/?)>`) - // NL is the newline string used in XML output, define for DOS-convenient. - NL = "\r\n" + // NL is the newline string used in XML output. + NL = "\n" ) -// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure -func FormatXML(xmls, prefix, indent string) string { - src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") +func init() { + // define NL for Windows + if runtime.GOOS == "windows" { + NL = "\r\n" + } +} +// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure. +// If your XML Comments have nested tags in them, or you're not 100% sure otherwise, pass `true` as the third parameter to this function. But don't turn it on blindly, as the code has become ten times more complicated because of it. +func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string { + nestedTagsInComment := false + if len(nestedTagsInComments) > 0 { + nestedTagsInComment = nestedTagsInComments[0] + } + reXmlComments := regexp.MustCompile(`(?s)()`) + src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + if nestedTagsInComment { + src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { + parts := reXmlComments.FindStringSubmatch(m) + p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") + return parts[1] + html.EscapeString(p2) + parts[3] + }) + } rf := replaceTag(prefix, indent) - return (prefix + reg.ReplaceAllStringFunc(src, rf)) + r := prefix + reg.ReplaceAllStringFunc(src, rf) + if nestedTagsInComment { + r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { + parts := reXmlComments.FindStringSubmatch(m) + return parts[1] + html.UnescapeString(parts[2]) + parts[3] + }) + } + + return r } // replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl // and deal with comments as well func replaceTag(prefix, indent string) func(string) string { indentLevel := 0 + lastEndElem := true return func(m string) string { // head elem if strings.HasPrefix(m, "") { + lastEndElem = true return NL + prefix + strings.Repeat(indent, indentLevel) + m } // comment elem @@ -45,12 +76,17 @@ func replaceTag(prefix, indent string) func(string) string { // end elem if strings.HasPrefix(m, " replacement @@ -57,7 +59,7 @@ such a fragment, gofmt preserves leading indentation as well as leading and trailing spaces, so that individual sections of a Go program can be formatted by piping them through gofmt. -Examples +# Examples To check files for unnecessary parentheses: @@ -71,7 +73,7 @@ To convert the package tree from explicit slice upper bounds to implicit ones: gofmt -r 'α[β:len(α)] -> α[β:]' -w $GOROOT/src -The simplify command +# The simplify command When invoked with -s gofmt will make the following source transformations where possible. diff --git a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go index e7612afae0..be046f34cf 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/gofmt.go +++ b/vendor/github.com/golangci/gofmt/gofmt/gofmt.go @@ -76,6 +76,11 @@ func initParserMode() { if *allErrors { parserMode |= parser.AllErrors } + // It's only -r that makes use of go/ast's object resolution, + // so avoid the unnecessary work if the flag isn't used. + if *rewriteRule == "" { + parserMode |= parser.SkipObjectResolution + } } func isGoFile(f fs.DirEntry) bool { @@ -286,12 +291,9 @@ func processFile(filename string, info fs.FileInfo, in io.Reader, r *reporter) e } } if *doDiff { - data, err := diffWithReplaceTempFile(src, res, filename) - if err != nil { - return fmt.Errorf("computing diff: %s", err) - } - fmt.Fprintf(r, "diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) - r.Write(data) + newName := filepath.ToSlash(filename) + oldName := newName + ".orig" + r.Write(diff.Diff(oldName, src, newName, res)) } } @@ -350,7 +352,12 @@ func readFile(filename string, info fs.FileInfo, in io.Reader) ([]byte, error) { // stop to avoid corrupting it.) src := make([]byte, size+1) n, err := io.ReadFull(in, src) - if err != nil && err != io.ErrUnexpectedEOF { + switch err { + case nil, io.EOF, io.ErrUnexpectedEOF: + // io.ReadFull returns io.EOF (for an empty file) or io.ErrUnexpectedEOF + // (for a non-empty file) if the file was changed unexpectedly. Continue + // with comparing file sizes in those cases. + default: return nil, err } if n < size { @@ -463,43 +470,6 @@ func fileWeight(path string, info fs.FileInfo) int64 { return info.Size() } -func diffWithReplaceTempFile(b1, b2 []byte, filename string) ([]byte, error) { - data, err := diff.Diff("gofmt", b1, b2) - if len(data) > 0 { - return replaceTempFilename(data, filename) - } - return data, err -} - -// replaceTempFilename replaces temporary filenames in diff with actual one. -// -// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 -// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 -// ... -// -> -// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 -// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 -// ... -func replaceTempFilename(diff []byte, filename string) ([]byte, error) { - bs := bytes.SplitN(diff, []byte{'\n'}, 3) - if len(bs) < 3 { - return nil, fmt.Errorf("got unexpected diff for %s", filename) - } - // Preserve timestamps. - var t0, t1 []byte - if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { - t0 = bs[0][i:] - } - if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { - t1 = bs[1][i:] - } - // Always print filepath with slash separator. - f := filepath.ToSlash(filename) - bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) - bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) - return bytes.Join(bs, []byte{'\n'}), nil -} - const chmodSupported = runtime.GOOS != "windows" // backupFile writes data to a new file named filename with permissions perm, diff --git a/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/vendor/github.com/golangci/gofmt/gofmt/golangci.go index c9c3fe2ae5..a69611e1d3 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/golangci.go +++ b/vendor/github.com/golangci/gofmt/gofmt/golangci.go @@ -8,8 +8,14 @@ import ( "go/printer" "go/token" "os" + "path/filepath" + "sync" + + "github.com/golangci/gofmt/gofmt/internal/diff" ) +var parserModeMu sync.RWMutex + type RewriteRule struct { Pattern string Replacement string @@ -31,7 +37,9 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) fset := token.NewFileSet() + parserModeMu.Lock() initParserMode() + parserModeMu.Unlock() file, sourceAdj, indentAdj, err := parse(fset, filename, src, false) if err != nil { @@ -59,12 +67,10 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) } // formatting has changed - data, err := diffWithReplaceTempFile(src, res, filename) - if err != nil { - return nil, fmt.Errorf("error computing diff: %s", err) - } + newName := filepath.ToSlash(filename) + oldName := newName + ".orig" - return data, nil + return diff.Diff(oldName, src, newName, res), nil } func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) { diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal.go b/vendor/github.com/golangci/gofmt/gofmt/internal.go index 1abbdd6989..31a825bf83 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/internal.go +++ b/vendor/github.com/golangci/gofmt/gofmt/internal.go @@ -26,6 +26,13 @@ func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( indentAdj int, err error, ) { + + // START - Change related to usgae inside golangci-lint + parserModeMu.Lock() + parserMode := parserMode + parserModeMu.Unlock() + // END - Change related to usgae inside golangci-lint + // Try as whole source file. file, err = parser.ParseFile(fset, filename, src, parserMode) // If there's no error, return. If the error is that the source file didn't begin with a diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go b/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go index cbd0529ec6..47b2856714 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go +++ b/vendor/github.com/golangci/gofmt/gofmt/internal/diff/diff.go @@ -1,79 +1,261 @@ -// Copyright 2019 The Go Authors. All rights reserved. +// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package diff implements a Diff function that compare two inputs -// using the 'diff' tool. package diff import ( "bytes" - "io/ioutil" - "os" - "runtime" - - exec "github.com/golangci/gofmt/gofmt/internal/execabs" + "fmt" + "sort" + "strings" ) -// Returns diff of two arrays of bytes in diff tool format. -func Diff(prefix string, b1, b2 []byte) ([]byte, error) { - f1, err := writeTempFile(prefix, b1) - if err != nil { - return nil, err +// A pair is a pair of values tracked for both the x and y side of a diff. +// It is typically a pair of line indexes. +type pair struct{ x, y int } + +// Diff returns an anchored diff of the two texts old and new +// in the “unified diff” format. If old and new are identical, +// Diff returns a nil slice (no output). +// +// Unix diff implementations typically look for a diff with +// the smallest number of lines inserted and removed, +// which can in the worst case take time quadratic in the +// number of lines in the texts. As a result, many implementations +// either can be made to run for a long time or cut off the search +// after a predetermined amount of work. +// +// In contrast, this implementation looks for a diff with the +// smallest number of “unique” lines inserted and removed, +// where unique means a line that appears just once in both old and new. +// We call this an “anchored diff” because the unique lines anchor +// the chosen matching regions. An anchored diff is usually clearer +// than a standard diff, because the algorithm does not try to +// reuse unrelated blank lines or closing braces. +// The algorithm also guarantees to run in O(n log n) time +// instead of the standard O(n²) time. +// +// Some systems call this approach a “patience diff,” named for +// the “patience sorting” algorithm, itself named for a solitaire card game. +// We avoid that name for two reasons. First, the name has been used +// for a few different variants of the algorithm, so it is imprecise. +// Second, the name is frequently interpreted as meaning that you have +// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, +// when in fact the algorithm is faster than the standard one. +func Diff(oldName string, old []byte, newName string, new []byte) []byte { + if bytes.Equal(old, new) { + return nil } - defer os.Remove(f1) + x := lines(old) + y := lines(new) + + // Print diff header. + var out bytes.Buffer + fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) + fmt.Fprintf(&out, "--- %s\n", oldName) + fmt.Fprintf(&out, "+++ %s\n", newName) + + // Loop over matches to consider, + // expanding each match to include surrounding lines, + // and then printing diff chunks. + // To avoid setup/teardown cases outside the loop, + // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair + // in the sequence of matches. + var ( + done pair // printed up to x[:done.x] and y[:done.y] + chunk pair // start lines of current chunk + count pair // number of lines from each side in current chunk + ctext []string // lines for current chunk + ) + for _, m := range tgs(x, y) { + if m.x < done.x { + // Already handled scanning forward from earlier match. + continue + } - f2, err := writeTempFile(prefix, b2) - if err != nil { - return nil, err + // Expand matching lines as far possible, + // establishing that x[start.x:end.x] == y[start.y:end.y]. + // Note that on the first (or last) iteration we may (or definitey do) + // have an empty match: start.x==end.x and start.y==end.y. + start := m + for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { + start.x-- + start.y-- + } + end := m + for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { + end.x++ + end.y++ + } + + // Emit the mismatched lines before start into this chunk. + // (No effect on first sentinel iteration, when start = {0,0}.) + for _, s := range x[done.x:start.x] { + ctext = append(ctext, "-"+s) + count.x++ + } + for _, s := range y[done.y:start.y] { + ctext = append(ctext, "+"+s) + count.y++ + } + + // If we're not at EOF and have too few common lines, + // the chunk includes all the common lines and continues. + const C = 3 // number of context lines + if (end.x < len(x) || end.y < len(y)) && + (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { + for _, s := range x[start.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + continue + } + + // End chunk with common lines for context. + if len(ctext) > 0 { + n := end.x - start.x + if n > C { + n = C + } + for _, s := range x[start.x : start.x+n] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = pair{start.x + n, start.y + n} + + // Format and emit chunk. + // Convert line numbers to 1-indexed. + // Special case: empty file shows up as 0,0 not 1,0. + if count.x > 0 { + chunk.x++ + } + if count.y > 0 { + chunk.y++ + } + fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) + for _, s := range ctext { + out.WriteString(s) + } + count.x = 0 + count.y = 0 + ctext = ctext[:0] + } + + // If we reached EOF, we're done. + if end.x >= len(x) && end.y >= len(y) { + break + } + + // Otherwise start a new chunk. + chunk = pair{end.x - C, end.y - C} + for _, s := range x[chunk.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end } - defer os.Remove(f2) - cmd := "diff" - if runtime.GOOS == "plan9" { - cmd = "/bin/ape/diff" + return out.Bytes() +} + +// lines returns the lines in the file x, including newlines. +// If the file does not end in a newline, one is supplied +// along with a warning about the missing newline. +func lines(x []byte) []string { + l := strings.SplitAfter(string(x), "\n") + if l[len(l)-1] == "" { + l = l[:len(l)-1] + } else { + // Treat last line as having a message about the missing newline attached, + // using the same text as BSD/GNU diff (including the leading backslash). + l[len(l)-1] += "\n\\ No newline at end of file\n" } + return l +} - data, err := exec.Command(cmd, "-u", f1, f2).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - err = nil +// tgs returns the pairs of indexes of the longest common subsequence +// of unique lines in x and y, where a unique line is one that appears +// once in x and once in y. +// +// The longest common subsequence algorithm is as described in +// Thomas G. Szymanski, “A Special Case of the Maximal Common +// Subsequence Problem,” Princeton TR #170 (January 1975), +// available at https://research.swtch.com/tgs170.pdf. +func tgs(x, y []string) []pair { + // Count the number of times each string appears in a and b. + // We only care about 0, 1, many, counted as 0, -1, -2 + // for the x side and 0, -4, -8 for the y side. + // Using negative numbers now lets us distinguish positive line numbers later. + m := make(map[string]int) + for _, s := range x { + if c := m[s]; c > -2 { + m[s] = c - 1 + } + } + for _, s := range y { + if c := m[s]; c > -8 { + m[s] = c - 4 + } } - // If we are on Windows and the diff is Cygwin diff, - // machines can get into a state where every Cygwin - // command works fine but prints a useless message like: + // Now unique strings can be identified by m[s] = -1+-4. // - // Cygwin WARNING: - // Couldn't compute FAST_CWD pointer. This typically occurs if you're using - // an older Cygwin version on a newer Windows. Please update to the latest - // available Cygwin version from https://cygwin.com/. If the problem persists, - // please see https://cygwin.com/problems.html - // - // Skip over that message and just return the actual diff. - if len(data) > 0 && !bytes.HasPrefix(data, []byte("--- ")) { - i := bytes.Index(data, []byte("\n--- ")) - if i >= 0 && i < 80*10 && bytes.Contains(data[:i], []byte("://cygwin.com/")) { - data = data[i+1:] + // Gather the indexes of those strings in x and y, building: + // xi[i] = increasing indexes of unique strings in x. + // yi[i] = increasing indexes of unique strings in y. + // inv[i] = index j such that x[xi[i]] = y[yi[j]]. + var xi, yi, inv []int + for i, s := range y { + if m[s] == -1+-4 { + m[s] = len(yi) + yi = append(yi, i) + } + } + for i, s := range x { + if j, ok := m[s]; ok && j >= 0 { + xi = append(xi, i) + inv = append(inv, j) } } - return data, err -} - -func writeTempFile(prefix string, data []byte) (string, error) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - return "", err + // Apply Algorithm A from Szymanski's paper. + // In those terms, A = J = inv and B = [0, n). + // We add sentinel pairs {0,0}, and {len(x),len(y)} + // to the returned sequence, to help the processing loop. + J := inv + n := len(xi) + T := make([]int, n) + L := make([]int, n) + for i := range T { + T[i] = n + 1 + } + for i := 0; i < n; i++ { + k := sort.Search(n, func(k int) bool { + return T[k] >= J[i] + }) + T[k] = J[i] + L[i] = k + 1 } - _, err = file.Write(data) - if err1 := file.Close(); err == nil { - err = err1 + k := 0 + for _, v := range L { + if k < v { + k = v + } } - if err != nil { - os.Remove(file.Name()) - return "", err + seq := make([]pair, 2+k) + seq[1+k] = pair{len(x), len(y)} // sentinel at end + lastj := n + for i := n - 1; i >= 0; i-- { + if L[i] == k && J[i] < lastj { + seq[k] = pair{xi[i], yi[J[i]]} + k-- + } } - return file.Name(), nil + seq[0] = pair{0, 0} // sentinel at start + return seq } diff --git a/vendor/github.com/golangci/gofmt/gofmt/internal/execabs/execabs.go b/vendor/github.com/golangci/gofmt/gofmt/internal/execabs/execabs.go deleted file mode 100644 index 9a05d971da..0000000000 --- a/vendor/github.com/golangci/gofmt/gofmt/internal/execabs/execabs.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -var ErrNotFound = exec.ErrNotFound - -type ( - Cmd = exec.Cmd - Error = exec.Error - ExitError = exec.ExitError -) - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable relative to current directory (.%c%s)", file, filepath.Separator, path) -} - -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/github.com/golangci/gofmt/gofmt/readme.md b/vendor/github.com/golangci/gofmt/gofmt/readme.md index 36a716d819..c2faaab82d 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/readme.md +++ b/vendor/github.com/golangci/gofmt/gofmt/readme.md @@ -1,3 +1,5 @@ # Hard Fork of gofmt 2022-08-31: Sync with go1.18.5 +2023-10-04: Sync with go1.19.13 +2023-10-04: Sync with go1.20.8 diff --git a/vendor/github.com/golangci/gofmt/gofmt/simplify.go b/vendor/github.com/golangci/gofmt/gofmt/simplify.go index 2c75495a69..3b34d562ba 100644 --- a/vendor/github.com/golangci/gofmt/gofmt/simplify.go +++ b/vendor/github.com/golangci/gofmt/gofmt/simplify.go @@ -53,22 +53,26 @@ func (s simplifier) Visit(node ast.Node) ast.Visitor { // can be simplified to: s[a:] // if s is "simple enough" (for now we only accept identifiers) // - // Note: This may not be correct because len may have been redeclared in another - // file belonging to the same package. However, this is extremely unlikely - // and so far (April 2016, after years of supporting this rewrite feature) + // Note: This may not be correct because len may have been redeclared in + // the same package. However, this is extremely unlikely and so far + // (April 2022, after years of supporting this rewrite feature) // has never come up, so let's keep it working as is (see also #15153). + // + // Also note that this code used to use go/ast's object tracking, + // which was removed in exchange for go/parser.Mode.SkipObjectResolution. + // False positives are extremely unlikely as described above, + // and go/ast's object tracking is incomplete in any case. if n.Max != nil { // - 3-index slices always require the 2nd and 3rd index break } - if s, _ := n.X.(*ast.Ident); s != nil && s.Obj != nil { - // the array/slice object is a single, resolved identifier + if s, _ := n.X.(*ast.Ident); s != nil { + // the array/slice object is a single identifier if call, _ := n.High.(*ast.CallExpr); call != nil && len(call.Args) == 1 && !call.Ellipsis.IsValid() { // the high expression is a function call with a single argument - if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" && fun.Obj == nil { - // the function called is "len" and it is not locally defined; and - // because we don't have dot imports, it must be the predefined len() - if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Obj == s.Obj { + if fun, _ := call.Fun.(*ast.Ident); fun != nil && fun.Name == "len" { + // the function called is "len" + if arg, _ := call.Args[0].(*ast.Ident); arg != nil && arg.Name == s.Name { // the len argument is the array/slice object n.High = nil } diff --git a/vendor/github.com/golangci/gofmt/goimports/goimports.go b/vendor/github.com/golangci/gofmt/goimports/goimports.go index 1fa3328f8a..20d92e119c 100644 --- a/vendor/github.com/golangci/gofmt/goimports/goimports.go +++ b/vendor/github.com/golangci/gofmt/goimports/goimports.go @@ -14,7 +14,7 @@ import ( "runtime" ) -// Extracted from golang.org/x/tools@v0.1.12/cmd/goimports/goimports.go +// Extracted from golang.org/x/tools@v0.13.0/cmd/goimports/goimports.go func writeTempFile(dir, prefix string, data []byte) (string, error) { file, err := ioutil.TempFile(dir, prefix) diff --git a/vendor/github.com/golangci/gofmt/goimports/golangci.go b/vendor/github.com/golangci/gofmt/goimports/golangci.go index 7edc37937c..6ff286ae06 100644 --- a/vendor/github.com/golangci/gofmt/goimports/golangci.go +++ b/vendor/github.com/golangci/gofmt/goimports/golangci.go @@ -3,7 +3,7 @@ package goimports import ( "bytes" "fmt" - "io/ioutil" + "os" "golang.org/x/tools/imports" ) @@ -11,7 +11,7 @@ import ( // Run runs goimports. // The local prefixes (comma separated) must be defined through the global variable imports.LocalPrefix. func Run(filename string) ([]byte, error) { - src, err := ioutil.ReadFile(filename) + src, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/github.com/golangci/gofmt/goimports/readme.md b/vendor/github.com/golangci/gofmt/goimports/readme.md index 6c793eb7d1..e57ed550b1 100644 --- a/vendor/github.com/golangci/gofmt/goimports/readme.md +++ b/vendor/github.com/golangci/gofmt/goimports/readme.md @@ -1,3 +1,4 @@ # Hard Fork of goimports 2022-08-31: Sync with golang.org/x/tools v0.1.12 +2023-10-04: Sync with golang.org/x/tools v0.13.0 diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go index 282d794b82..9d1daa81df 100644 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go +++ b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -3,12 +3,15 @@ package main import ( "fmt" "os" + "runtime/debug" "github.com/golangci/golangci-lint/pkg/commands" "github.com/golangci/golangci-lint/pkg/exitcodes" ) var ( + goVersion = "unknown" + // Populated by goreleaser during build version = "master" commit = "?" @@ -16,7 +19,24 @@ var ( ) func main() { - e := commands.NewExecutor(version, commit, date) + if buildInfo, available := debug.ReadBuildInfo(); available { + goVersion = buildInfo.GoVersion + + if date == "" { + version = buildInfo.Main.Version + commit = fmt.Sprintf("(unknown, mod sum: %q)", buildInfo.Main.Sum) + date = "(unknown)" + } + } + + info := commands.BuildInfo{ + GoVersion: goVersion, + Version: version, + Commit: commit, + Date: date, + } + + e := commands.NewExecutor(info) if err := e.Execute(); err != nil { fmt.Fprintf(os.Stderr, "failed executing command with error %v\n", err) diff --git a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go b/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go deleted file mode 100644 index 119a8a60db..0000000000 --- a/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/mod_version.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "fmt" - "runtime/debug" -) - -//nolint:gochecknoinits -func init() { - if info, available := debug.ReadBuildInfo(); available { - if date == "" { - version = info.Main.Version - commit = fmt.Sprintf("(unknown, mod sum: %q)", info.Main.Sum) - date = "(unknown)" - } - } -} diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go index 51c75a77d2..299fd52790 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go @@ -12,6 +12,7 @@ import ( "bytes" "crypto/sha256" "encoding/hex" + "errors" "fmt" "io" "os" @@ -20,8 +21,6 @@ import ( "strings" "time" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/internal/renameio" "github.com/golangci/golangci-lint/internal/robustio" ) @@ -51,14 +50,13 @@ type Cache struct { // to share a cache directory (for example, if the directory were stored // in a network file system). File locking is notoriously unreliable in // network file systems and may not suffice to protect the cache. -// func Open(dir string) (*Cache, error) { info, err := os.Stat(dir) if err != nil { return nil, err } if !info.IsDir() { - return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} + return nil, &os.PathError{Op: "open", Path: dir, Err: errors.New("not a directory")} } for i := 0; i < 256; i++ { name := filepath.Join(dir, fmt.Sprintf("%02x", i)) @@ -81,7 +79,7 @@ func (c *Cache) fileName(id [HashSize]byte, key string) string { var errMissing = errors.New("cache entry not found") func IsErrMissing(err error) bool { - return errors.Cause(err) == errMissing + return errors.Is(err, errMissing) } const ( @@ -159,7 +157,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { defer f.Close() entry := make([]byte, entrySize+1) // +1 to detect whether f is too long if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF { - return failed(fmt.Errorf("read %d/%d bytes from %s with error %s", n, entrySize, fileName, readErr)) + return failed(fmt.Errorf("read %d/%d bytes from %s with error %w", n, entrySize, fileName, readErr)) } if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { return failed(fmt.Errorf("bad data in %s", fileName)) @@ -170,10 +168,10 @@ func (c *Cache) get(id ActionID) (Entry, error) { etime := entry[1 : 1+20] var buf [HashSize]byte if _, err = hex.Decode(buf[:], eid); err != nil || buf != id { - return failed(errors.Wrapf(err, "failed to hex decode eid data in %s", fileName)) + return failed(fmt.Errorf("failed to hex decode eid data in %s: %w", fileName, err)) } if _, err = hex.Decode(buf[:], eout); err != nil { - return failed(errors.Wrapf(err, "failed to hex decode eout data in %s", fileName)) + return failed(fmt.Errorf("failed to hex decode eout data in %s: %w", fileName, err)) } i := 0 for i < len(esize) && esize[i] == ' ' { @@ -181,7 +179,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { } size, err := strconv.ParseInt(string(esize[i:]), 10, 64) if err != nil || size < 0 { - return failed(fmt.Errorf("failed to parse esize int from %s with error %s", fileName, err)) + return failed(fmt.Errorf("failed to parse esize int from %s with error %w", fileName, err)) } i = 0 for i < len(etime) && etime[i] == ' ' { @@ -189,11 +187,11 @@ func (c *Cache) get(id ActionID) (Entry, error) { } tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) if err != nil || tm < 0 { - return failed(fmt.Errorf("failed to parse etime int from %s with error %s", fileName, err)) + return failed(fmt.Errorf("failed to parse etime int from %s with error %w", fileName, err)) } if err = c.used(fileName); err != nil { - return failed(errors.Wrapf(err, "failed to mark %s as used", fileName)) + return failed(fmt.Errorf("failed to mark %s as used: %w", fileName, err)) } return Entry{buf, size, time.Unix(0, tm)}, nil @@ -257,7 +255,7 @@ const ( // and to reduce the amount of disk activity caused by using // cache entries, used only updates the mtime if the current // mtime is more than an hour old. This heuristic eliminates -// nearly all of the mtime updates that would otherwise happen, +// nearly all the mtime updates that would otherwise happen, // while still keeping the mtimes useful for cache trimming. func (c *Cache) used(file string) error { info, err := os.Stat(file) @@ -265,7 +263,7 @@ func (c *Cache) used(file string) error { if os.IsNotExist(err) { return errMissing } - return errors.Wrapf(err, "failed to stat file %s", file) + return fmt.Errorf("failed to stat file %s: %w", file, err) } if c.now().Sub(info.ModTime()) < mtimeInterval { @@ -273,7 +271,7 @@ func (c *Cache) used(file string) error { } if err := os.Chtimes(file, c.now(), c.now()); err != nil { - return errors.Wrapf(err, "failed to change time of file %s", file) + return fmt.Errorf("failed to change time of file %s: %w", file, err) } return nil @@ -311,7 +309,7 @@ func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { // Read all directory entries from subdir before removing // any files, in case removing files invalidates the file offset // in the directory scan. Also, ignore error from f.Readdirnames, - // because we don't care about reporting the error and we still + // because we don't care about reporting the error, and we still // want to process any entries found before the error. f, err := os.Open(subdir) if err != nil { @@ -370,7 +368,7 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify // Truncate the file only *after* writing it. // (This should be a no-op, but truncate just in case of previous corruption.) // - // This differs from ioutil.WriteFile, which truncates to 0 *before* writing + // This differs from os.WriteFile, which truncates to 0 *before* writing // via os.O_TRUNC. Truncating only after writing ensures that a second write // of the same content to the same file is idempotent, and does not — even // temporarily! — undo the effect of the first write. @@ -386,7 +384,7 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify return err } if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests - return errors.Wrapf(err, "failed to change time of file %s", file) + return fmt.Errorf("failed to change time of file %s: %w", file, err) } return nil @@ -444,7 +442,7 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { if f, openErr := os.Open(name); openErr == nil { h := sha256.New() if _, copyErr := io.Copy(h, f); copyErr != nil { - return errors.Wrap(copyErr, "failed to copy to sha256") + return fmt.Errorf("failed to copy to sha256: %w", copyErr) } f.Close() @@ -498,13 +496,13 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { return err } if n, wErr := h.Write(buf); n != len(buf) { - return fmt.Errorf("wrote to hash %d/%d bytes with error %s", n, len(buf), wErr) + return fmt.Errorf("wrote to hash %d/%d bytes with error %w", n, len(buf), wErr) } sum := h.Sum(nil) if !bytes.Equal(sum, out[:]) { _ = f.Truncate(0) - return fmt.Errorf("file content changed underfoot") + return errors.New("file content changed underfoot") } // Commit cache file entry. @@ -520,7 +518,7 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { return err } if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests - return errors.Wrapf(err, "failed to change time of file %s", name) + return fmt.Errorf("failed to change time of file %s: %w", name, err) } return nil diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go index e8866cb30c..399cc84cf0 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go @@ -6,13 +6,14 @@ package cache import ( "fmt" - "io/ioutil" "log" "os" "path/filepath" "sync" ) +const envGolangciLintCache = "GOLANGCI_LINT_CACHE" + // Default returns the default cache to use. func Default() (*Cache, error) { defaultOnce.Do(initDefaultCache) @@ -39,7 +40,7 @@ func initDefaultCache() { } if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { // Best effort. - if wErr := ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { + if wErr := os.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err) } } @@ -65,19 +66,19 @@ func DefaultDir() string { // otherwise distinguish between an explicit "off" and a UserCacheDir error. defaultDirOnce.Do(func() { - defaultDir = os.Getenv("GOLANGCI_LINT_CACHE") + defaultDir = os.Getenv(envGolangciLintCache) if filepath.IsAbs(defaultDir) { return } if defaultDir != "" { - defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not an absolute path") + defaultDirErr = fmt.Errorf("%s is not an absolute path", envGolangciLintCache) return } // Compute default location. dir, err := os.UserCacheDir() if err != nil { - defaultDirErr = fmt.Errorf("GOLANGCI_LINT_CACHE is not defined and %v", err) + defaultDirErr = fmt.Errorf("%s is not defined and %w", envGolangciLintCache, err) return } defaultDir = filepath.Join(dir, "golangci-lint") diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md b/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md new file mode 100644 index 0000000000..b469711edd --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md @@ -0,0 +1,18 @@ +# cache + +Extracted from go/src/cmd/go/internal/cache/ +I don't know what version of Go this package was pulled from. + +Adapted for golangci-lint: +- https://github.com/golangci/golangci-lint/pull/699 +- https://github.com/golangci/golangci-lint/pull/779 +- https://github.com/golangci/golangci-lint/pull/788 +- https://github.com/golangci/golangci-lint/pull/808 +- https://github.com/golangci/golangci-lint/pull/1063 +- https://github.com/golangci/golangci-lint/pull/1070 +- https://github.com/golangci/golangci-lint/pull/1162 +- https://github.com/golangci/golangci-lint/pull/2318 +- https://github.com/golangci/golangci-lint/pull/2352 +- https://github.com/golangci/golangci-lint/pull/3012 +- https://github.com/golangci/golangci-lint/pull/3096 +- https://github.com/golangci/golangci-lint/pull/3204 diff --git a/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go b/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go index 5cb86d6698..c8a3a0357e 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go +++ b/vendor/github.com/golangci/golangci-lint/internal/errorutil/errors.go @@ -6,11 +6,11 @@ import ( // PanicError can be used to not print stacktrace twice type PanicError struct { - recovered interface{} + recovered any stack []byte } -func NewPanicError(recovered interface{}, stack []byte) *PanicError { +func NewPanicError(recovered any, stack []byte) *PanicError { return &PanicError{recovered: recovered, stack: stack} } diff --git a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go index 86007d0427..3b3422eb7a 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go @@ -4,12 +4,12 @@ import ( "bytes" "encoding/gob" "encoding/hex" + "errors" "fmt" "runtime" "sort" "sync" - "github.com/pkg/errors" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/internal/cache" @@ -26,7 +26,7 @@ const ( ) // Cache is a per-package data cache. A cached data is invalidated when -// package or it's dependencies change. +// package, or it's dependencies change. type Cache struct { lowLevelCache *cache.Cache pkgHashes sync.Map @@ -54,14 +54,14 @@ func (c *Cache) Trim() { }) } -func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data interface{}) error { +func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data any) error { var err error buf := &bytes.Buffer{} c.sw.TrackStage("gob", func() { err = gob.NewEncoder(buf).Encode(data) }) if err != nil { - return errors.Wrap(err, "failed to gob encode") + return fmt.Errorf("failed to gob encode: %w", err) } var aID cache.ActionID @@ -71,13 +71,13 @@ func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data inter if err == nil { subkey, subkeyErr := cache.Subkey(aID, key) if subkeyErr != nil { - err = errors.Wrap(subkeyErr, "failed to build subkey") + err = fmt.Errorf("failed to build subkey: %w", subkeyErr) } aID = subkey } }) if err != nil { - return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name) + return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err) } c.ioSem <- struct{}{} c.sw.TrackStage("cache io", func() { @@ -85,7 +85,7 @@ func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data inter }) <-c.ioSem if err != nil { - return errors.Wrapf(err, "failed to save data to low-level cache by key %s for package %s", key, pkg.Name) + return fmt.Errorf("failed to save data to low-level cache by key %s for package %s: %w", key, pkg.Name, err) } return nil @@ -93,7 +93,7 @@ func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data inter var ErrMissing = errors.New("missing data") -func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data interface{}) error { +func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data any) error { var aID cache.ActionID var err error c.sw.TrackStage("key build", func() { @@ -101,13 +101,13 @@ func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data inter if err == nil { subkey, subkeyErr := cache.Subkey(aID, key) if subkeyErr != nil { - err = errors.Wrap(subkeyErr, "failed to build subkey") + err = fmt.Errorf("failed to build subkey: %w", subkeyErr) } aID = subkey } }) if err != nil { - return errors.Wrapf(err, "failed to calculate package %s action id", pkg.Name) + return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err) } var b []byte @@ -120,14 +120,14 @@ func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data inter if cache.IsErrMissing(err) { return ErrMissing } - return errors.Wrapf(err, "failed to get data from low-level cache by key %s for package %s", key, pkg.Name) + return fmt.Errorf("failed to get data from low-level cache by key %s for package %s: %w", key, pkg.Name, err) } c.sw.TrackStage("gob", func() { err = gob.NewDecoder(bytes.NewReader(b)).Decode(data) }) if err != nil { - return errors.Wrap(err, "failed to gob decode") + return fmt.Errorf("failed to gob decode: %w", err) } return nil @@ -136,12 +136,12 @@ func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data inter func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) { hash, err := c.packageHash(pkg, mode) if err != nil { - return cache.ActionID{}, errors.Wrap(err, "failed to get package hash") + return cache.ActionID{}, fmt.Errorf("failed to get package hash: %w", err) } key, err := cache.NewHash("action ID") if err != nil { - return cache.ActionID{}, errors.Wrap(err, "failed to make a hash") + return cache.ActionID{}, fmt.Errorf("failed to make a hash: %w", err) } fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) fmt.Fprintf(key, "pkghash %s\n", hash) @@ -167,7 +167,7 @@ func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error key, err := cache.NewHash("package hash") if err != nil { - return "", errors.Wrap(err, "failed to make a hash") + return "", fmt.Errorf("failed to make a hash: %w", err) } fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) @@ -176,7 +176,7 @@ func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error h, fErr := cache.FileHash(f) <-c.ioSem if fErr != nil { - return "", errors.Wrapf(fErr, "failed to calculate file %s hash", f) + return "", fmt.Errorf("failed to calculate file %s hash: %w", f, fErr) } fmt.Fprintf(key, "file %s %x\n", f, h) } @@ -199,7 +199,7 @@ func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error depHash, depErr := c.packageHash(dep, depMode) if depErr != nil { - return errors.Wrapf(depErr, "failed to calculate hash for dependency %s with mode %d", dep.Name, depMode) + return fmt.Errorf("failed to calculate hash for dependency %s with mode %d: %w", dep.Name, depMode, depErr) } fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md b/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md new file mode 100644 index 0000000000..36ec6ed499 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md @@ -0,0 +1,10 @@ +# renameio + +Extracted from go/src/cmd/go/internal/renameio/ +I don't know what version of Go this package was pulled from. + +Adapted for golangci-lint: +- https://github.com/golangci/golangci-lint/pull/699 +- https://github.com/golangci/golangci-lint/pull/808 +- https://github.com/golangci/golangci-lint/pull/1063 +- https://github.com/golangci/golangci-lint/pull/3204 diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go index fa9d93bf7a..2f88f4f7cc 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go +++ b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go @@ -24,7 +24,7 @@ func Pattern(filename string) string { return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) } -// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary +// WriteFile is like os.WriteFile, but first writes data to an arbitrary // file in the same directory as filename, then renames it atomically to the // final name. // @@ -79,7 +79,7 @@ func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) { return } -// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that +// ReadFile is like os.ReadFile, but on Windows retries spurious errors that // may occur if the file is concurrently replaced. // // Errors are classified heuristically and retries are bounded, so even this diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md b/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md new file mode 100644 index 0000000000..7c7ba0483a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md @@ -0,0 +1,6 @@ +# robustio + +Extracted from go1.19.1/src/cmd/go/internal/robustio + +There is only one modification: +- ERROR_SHARING_VIOLATION extracted from go1.19.1/src/internal/syscall/windows/syscall_windows.go to remove the dependencies to `internal/syscall/windows` diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go index 76e47ad1ff..15b33773cf 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go @@ -22,7 +22,7 @@ func Rename(oldpath, newpath string) error { return rename(oldpath, newpath) } -// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may +// ReadFile is like os.ReadFile, but on Windows retries errors that may // occur if the file is concurrently replaced. // // (See golang.org/issue/31247 and golang.org/issue/32188.) @@ -42,9 +42,9 @@ func RemoveAll(path string) error { // in this package attempt to mitigate. // // Errors considered ephemeral include: -// - syscall.ERROR_ACCESS_DENIED -// - syscall.ERROR_FILE_NOT_FOUND -// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION // // This set may be expanded in the future; programs must not rely on the // non-ephemerality of any given error. diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go index 1ac0d10d7f..99fd8ebc2f 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go @@ -5,7 +5,7 @@ package robustio import ( - "os" + "errors" "syscall" ) @@ -13,16 +13,8 @@ const errFileNotFound = syscall.ENOENT // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - switch werr := err.(type) { - case *os.PathError: - err = werr.Err - case *os.LinkError: - err = werr.Err - case *os.SyscallError: - err = werr.Err - - } - if errno, ok := err.(syscall.Errno); ok { + var errno syscall.Errno + if errors.As(err, &errno) { return errno == errFileNotFound } return false diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go index e0bf5b9b3b..c56e36ca62 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go @@ -2,21 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build windows darwin +//go:build windows || darwin package robustio import ( - "io/ioutil" + "errors" "math/rand" "os" "syscall" "time" ) -const arbitraryTimeout = 500 * time.Millisecond - -const ERROR_SHARING_VIOLATION = 32 +const arbitraryTimeout = 2000 * time.Millisecond // retry retries ephemeral errors from f up to an arbitrary timeout // to work around filesystem flakiness on Windows and Darwin. @@ -33,7 +31,8 @@ func retry(f func() (err error, mayRetry bool)) error { return err } - if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) { + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { bestErr = err lowestErrno = errno } else if bestErr == nil { @@ -54,7 +53,7 @@ func retry(f func() (err error, mayRetry bool)) error { // rename is like os.Rename, but retries ephemeral errors. // -// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with // MOVEFILE_REPLACE_EXISTING. // // Windows also provides a different system call, ReplaceFile, @@ -70,17 +69,16 @@ func rename(oldpath, newpath string) (err error) { }) } -// readFile is like ioutil.ReadFile, but retries ephemeral errors. +// readFile is like os.ReadFile, but retries ephemeral errors. func readFile(filename string) ([]byte, error) { var b []byte err := retry(func() (err error, mayRetry bool) { - b, err = ioutil.ReadFile(filename) + b, err = os.ReadFile(filename) // Unlike in rename, we do not retry errFileNotFound here: it can occur // as a spurious error, but the file may also genuinely not exist, so the // increase in robustness is probably not worth the extra latency. - - return err, isEphemeralError(err) && err != errFileNotFound + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) }) return b, err } diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go index a2428856f2..da9a46e4fa 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go @@ -2,12 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//+build !windows,!darwin +//go:build !windows && !darwin package robustio import ( - "io/ioutil" "os" ) @@ -16,7 +15,7 @@ func rename(oldpath, newpath string) error { } func readFile(filename string) ([]byte, error) { - return ioutil.ReadFile(filename) + return os.ReadFile(filename) } func removeAll(path string) error { diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go index a35237d44a..fe1728954c 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go +++ b/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go @@ -5,23 +5,20 @@ package robustio import ( - "os" + "errors" "syscall" ) const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND +// ERROR_SHARING_VIOLATION (ldez) extract from go1.19.1/src/internal/syscall/windows/syscall_windows.go. +// This is the only modification of this file. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 + // isEphemeralError returns true if err may be resolved by waiting. func isEphemeralError(err error) bool { - switch werr := err.(type) { - case *os.PathError: - err = werr.Err - case *os.LinkError: - err = werr.Err - case *os.SyscallError: - err = werr.Err - } - if errno, ok := err.(syscall.Errno); ok { + var errno syscall.Errno + if errors.As(err, &errno) { switch errno { case syscall.ERROR_ACCESS_DENIED, syscall.ERROR_FILE_NOT_FOUND, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go index 359e2d63c7..6fdaebaede 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go @@ -16,66 +16,54 @@ func (e *Executor) initCache() { cacheCmd := &cobra.Command{ Use: "cache", Short: "Cache control and information", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint cache") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run cache: %s", err) - } + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, } e.rootCmd.AddCommand(cacheCmd) cacheCmd.AddCommand(&cobra.Command{ - Use: "clean", - Short: "Clean cache", - Run: e.executeCleanCache, + Use: "clean", + Short: "Clean cache", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: e.executeCleanCache, }) cacheCmd.AddCommand(&cobra.Command{ - Use: "status", - Short: "Show cache status", - Run: e.executeCacheStatus, + Use: "status", + Short: "Show cache status", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: e.executeCacheStatus, }) // TODO: add trim command? } -func (e *Executor) executeCleanCache(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint cache clean") - } - +func (e *Executor) executeCleanCache(_ *cobra.Command, _ []string) error { cacheDir := cache.DefaultDir() if err := os.RemoveAll(cacheDir); err != nil { - e.log.Fatalf("Failed to remove dir %s: %s", cacheDir, err) + return fmt.Errorf("failed to remove dir %s: %w", cacheDir, err) } - os.Exit(0) + return nil } -func (e *Executor) executeCacheStatus(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint cache status") - } - +func (e *Executor) executeCacheStatus(_ *cobra.Command, _ []string) { cacheDir := cache.DefaultDir() fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir) + cacheSizeBytes, err := dirSizeBytes(cacheDir) if err == nil { fmt.Fprintf(logutils.StdOut, "Size: %s\n", fsutils.PrettifyBytesCount(cacheSizeBytes)) } - - os.Exit(0) } func dirSizeBytes(path string) (int64, error) { var size int64 err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { + if err == nil && !info.IsDir() { size += info.Size() } return err diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/completion.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/completion.go deleted file mode 100644 index e2be6f2929..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/completion.go +++ /dev/null @@ -1,85 +0,0 @@ -package commands - -import ( - "fmt" - "os" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -func (e *Executor) initCompletion() { - completionCmd := &cobra.Command{ - Use: "completion", - Short: "Output completion script", - } - e.rootCmd.AddCommand(completionCmd) - - bashCmd := &cobra.Command{ - Use: "bash", - Short: "Output bash completion script", - RunE: e.executeBashCompletion, - } - completionCmd.AddCommand(bashCmd) - - zshCmd := &cobra.Command{ - Use: "zsh", - Short: "Output zsh completion script", - RunE: e.executeZshCompletion, - } - completionCmd.AddCommand(zshCmd) - - fishCmd := &cobra.Command{ - Use: "fish", - Short: "Output fish completion script", - RunE: e.executeFishCompletion, - } - completionCmd.AddCommand(fishCmd) - - powerShell := &cobra.Command{ - Use: "powershell", - Short: "Output powershell completion script", - RunE: e.executePowerShellCompletion, - } - completionCmd.AddCommand(powerShell) -} - -func (e *Executor) executeBashCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenBashCompletion(os.Stdout) - if err != nil { - return errors.Wrap(err, "unable to generate bash completions: %v") - } - - return nil -} - -func (e *Executor) executeZshCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenZshCompletion(os.Stdout) - if err != nil { - return errors.Wrap(err, "unable to generate zsh completions: %v") - } - // Add extra compdef directive to support sourcing command directly. - // https://github.com/spf13/cobra/issues/881 - // https://github.com/spf13/cobra/pull/887 - fmt.Println("compdef _golangci-lint golangci-lint") - - return nil -} - -func (e *Executor) executeFishCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenFishCompletion(os.Stdout, true) - if err != nil { - return errors.Wrap(err, "generate fish completion") - } - - return nil -} - -func (e *Executor) executePowerShellCompletion(cmd *cobra.Command, args []string) error { - err := cmd.Root().GenPowerShellCompletion(os.Stdout) - if err != nil { - return errors.Wrap(err, "generate powershell completion") - } - - return nil -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go index 4b63e2e523..45c4fcd77c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/config.go @@ -5,8 +5,10 @@ import ( "os" "github.com/spf13/cobra" + "github.com/spf13/pflag" "github.com/spf13/viper" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/exitcodes" "github.com/golangci/golangci-lint/pkg/fsutils" ) @@ -14,27 +16,32 @@ import ( func (e *Executor) initConfig() { cmd := &cobra.Command{ Use: "config", - Short: "Config", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint config") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run help: %s", err) - } + Short: "Config file information", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, } e.rootCmd.AddCommand(cmd) pathCmd := &cobra.Command{ - Use: "path", - Short: "Print used config path", - Run: e.executePathCmd, + Use: "path", + Short: "Print used config path", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: e.executePathCmd, } - e.initRunConfiguration(pathCmd) // allow --config + + fs := pathCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + initConfigFileFlagSet(fs, &e.cfg.Run) + cmd.AddCommand(pathCmd) } +// getUsedConfig returns the resolved path to the golangci config file, or the empty string +// if no configuration could be found. func (e *Executor) getUsedConfig() string { usedConfigFile := viper.ConfigFileUsed() if usedConfigFile == "" { @@ -50,11 +57,7 @@ func (e *Executor) getUsedConfig() string { return prettyUsedConfigFile } -func (e *Executor) executePathCmd(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint config path") - } - +func (e *Executor) executePathCmd(_ *cobra.Command, _ []string) { usedConfigFile := e.getUsedConfig() if usedConfigFile == "" { e.log.Warnf("No config file detected") @@ -62,5 +65,9 @@ func (e *Executor) executePathCmd(_ *cobra.Command, args []string) { } fmt.Println(usedConfigFile) - os.Exit(0) +} + +func initConfigFileFlagSet(fs *pflag.FlagSet, cfg *config.Run) { + fs.StringVarP(&cfg.Config, "config", "c", "", wh("Read config from file path `PATH`")) + fs.BoolVar(&cfg.NoConfig, "no-config", false, wh("Don't read config file")) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go index a060709ef7..d241f5656a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go @@ -4,6 +4,8 @@ import ( "bytes" "context" "crypto/sha256" + "errors" + "fmt" "io" "os" "path/filepath" @@ -12,7 +14,6 @@ import ( "github.com/fatih/color" "github.com/gofrs/flock" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v3" @@ -30,15 +31,22 @@ import ( "github.com/golangci/golangci-lint/pkg/timeutils" ) +type BuildInfo struct { + GoVersion string `json:"goVersion"` + Version string `json:"version"` + Commit string `json:"commit"` + Date string `json:"date"` +} + type Executor struct { rootCmd *cobra.Command runCmd *cobra.Command lintersCmd *cobra.Command - exitCode int - version, commit, date string + exitCode int + buildInfo BuildInfo - cfg *config.Config + cfg *config.Config // cfg is the unmarshaled data from the golangci config file. log logutils.Log reportData report.Data DBManager *lintersdb.Manager @@ -55,24 +63,23 @@ type Executor struct { flock *flock.Flock } -func NewExecutor(version, commit, date string) *Executor { +// NewExecutor creates and initializes a new command executor. +func NewExecutor(buildInfo BuildInfo) *Executor { startedAt := time.Now() e := &Executor{ cfg: config.NewDefault(), - version: version, - commit: commit, - date: date, + buildInfo: buildInfo, DBManager: lintersdb.NewManager(nil, nil), - debugf: logutils.Debug("exec"), + debugf: logutils.Debug(logutils.DebugKeyExec), } e.debugf("Starting execution...") - e.log = report.NewLogWrapper(logutils.NewStderrLog(""), &e.reportData) + e.log = report.NewLogWrapper(logutils.NewStderrLog(logutils.DebugKeyEmpty), &e.reportData) // to setup log level early we need to parse config from command line extra time to // find `-v` option commandLineCfg, err := e.getConfigForCommandLine() - if err != nil && err != pflag.ErrHelp { + if err != nil && !errors.Is(err, pflag.ErrHelp) { e.log.Fatalf("Can't get config for command line: %s", err) } if commandLineCfg != nil { @@ -97,7 +104,6 @@ func NewExecutor(version, commit, date string) *Executor { e.initHelp() e.initLinters() e.initConfig() - e.initCompletion() e.initVersion() e.initCache() @@ -105,38 +111,37 @@ func NewExecutor(version, commit, date string) *Executor { // like the default ones. It will overwrite them only if the same option // is found in command-line: it's ok, command-line has higher priority. - r := config.NewFileReader(e.cfg, commandLineCfg, e.log.Child("config_reader")) + r := config.NewFileReader(e.cfg, commandLineCfg, e.log.Child(logutils.DebugKeyConfigReader)) if err = r.Read(); err != nil { e.log.Fatalf("Can't read config: %s", err) } - // recreate after getting config - e.DBManager = lintersdb.NewManager(e.cfg, e.log).WithCustomLinters() - - e.cfg.LintersSettings.Gocritic.InferEnabledChecks(e.log) - if err = e.cfg.LintersSettings.Gocritic.Validate(e.log); err != nil { - e.log.Fatalf("Invalid gocritic settings: %s", err) + if (commandLineCfg == nil || commandLineCfg.Run.Go == "") && e.cfg != nil && e.cfg.Run.Go == "" { + e.cfg.Run.Go = config.DetectGoVersion() } + // recreate after getting config + e.DBManager = lintersdb.NewManager(e.cfg, e.log) + // Slice options must be explicitly set for proper merging of config and command-line options. fixSlicesFlags(e.runCmd.Flags()) fixSlicesFlags(e.lintersCmd.Flags()) e.EnabledLintersSet = lintersdb.NewEnabledSet(e.DBManager, - lintersdb.NewValidator(e.DBManager), e.log.Child("lintersdb"), e.cfg) - e.goenv = goutil.NewEnv(e.log.Child("goenv")) + lintersdb.NewValidator(e.DBManager), e.log.Child(logutils.DebugKeyLintersDB), e.cfg) + e.goenv = goutil.NewEnv(e.log.Child(logutils.DebugKeyGoEnv)) e.fileCache = fsutils.NewFileCache() e.lineCache = fsutils.NewLineCache(e.fileCache) - e.sw = timeutils.NewStopwatch("pkgcache", e.log.Child("stopwatch")) - e.pkgCache, err = pkgcache.NewCache(e.sw, e.log.Child("pkgcache")) + e.sw = timeutils.NewStopwatch("pkgcache", e.log.Child(logutils.DebugKeyStopwatch)) + e.pkgCache, err = pkgcache.NewCache(e.sw, e.log.Child(logutils.DebugKeyPkgCache)) if err != nil { e.log.Fatalf("Failed to build packages cache: %s", err) } e.loadGuard = load.NewGuard() - e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv, + e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child(logutils.DebugKeyLoader), e.goenv, e.lineCache, e.fileCache, e.pkgCache, e.loadGuard) - if err = e.initHashSalt(version); err != nil { + if err = e.initHashSalt(buildInfo.Version); err != nil { e.log.Fatalf("Failed to init hash salt: %s", err) } e.debugf("Initialized executor in %s", time.Since(startedAt)) @@ -150,16 +155,15 @@ func (e *Executor) Execute() error { func (e *Executor) initHashSalt(version string) error { binSalt, err := computeBinarySalt(version) if err != nil { - return errors.Wrap(err, "failed to calculate binary salt") + return fmt.Errorf("failed to calculate binary salt: %w", err) } configSalt, err := computeConfigSalt(e.cfg) if err != nil { - return errors.Wrap(err, "failed to calculate config salt") + return fmt.Errorf("failed to calculate config salt: %w", err) } - var b bytes.Buffer - b.Write(binSalt) + b := bytes.NewBuffer(binSalt) b.Write(configSalt) cache.SetSalt(b.Bytes()) return nil @@ -170,7 +174,7 @@ func computeBinarySalt(version string) ([]byte, error) { return []byte(version), nil } - if logutils.HaveDebugTag("bin_salt") { + if logutils.HaveDebugTag(logutils.DebugKeyBinSalt) { return []byte("debug"), nil } @@ -196,11 +200,10 @@ func computeConfigSalt(cfg *config.Config) ([]byte, error) { lintersSettingsBytes, err := yaml.Marshal(cfg.LintersSettings) if err != nil { - return nil, errors.Wrap(err, "failed to json marshal config linter settings") + return nil, fmt.Errorf("failed to json marshal config linter settings: %w", err) } - var configData bytes.Buffer - configData.WriteString("linters-settings=") + configData := bytes.NewBufferString("linters-settings=") configData.Write(lintersSettingsBytes) configData.WriteString("\nbuild-tags=%s" + strings.Join(cfg.Run.BuildTags, ",")) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go index ef276481c7..a06d508f27 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "os" "sort" "strings" @@ -17,28 +16,26 @@ func (e *Executor) initHelp() { helpCmd := &cobra.Command{ Use: "help", Short: "Help", - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint help") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run help: %s", err) - } + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, } e.rootCmd.SetHelpCommand(helpCmd) lintersHelpCmd := &cobra.Command{ - Use: "linters", - Short: "Help about linters", - Run: e.executeLintersHelp, + Use: "linters", + Short: "Help about linters", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + Run: e.executeLintersHelp, } helpCmd.AddCommand(lintersHelpCmd) } func printLinterConfigs(lcs []*linter.Config) { sort.Slice(lcs, func(i, j int) bool { - return strings.Compare(lcs[i].Name(), lcs[j].Name()) < 0 + return lcs[i].Name() < lcs[j].Name() }) for _, lc := range lcs { altNamesStr := "" @@ -53,18 +50,23 @@ func printLinterConfigs(lcs []*linter.Config) { linterDescription = linterDescription[:firstNewline] } - fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", color.YellowString(lc.Name()), - altNamesStr, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) - } -} + deprecatedMark := "" + if lc.IsDeprecated() { + deprecatedMark = " [" + color.RedString("deprecated") + "]" + } -func (e *Executor) executeLintersHelp(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint help linters") + fmt.Fprintf(logutils.StdOut, "%s%s%s: %s [fast: %t, auto-fix: %t]\n", color.YellowString(lc.Name()), + altNamesStr, deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) } +} +func (e *Executor) executeLintersHelp(_ *cobra.Command, _ []string) { var enabledLCs, disabledLCs []*linter.Config for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + if lc.Internal { + continue + } + if lc.EnabledByDefault { enabledLCs = append(enabledLCs, lc) } else { @@ -80,13 +82,15 @@ func (e *Executor) executeLintersHelp(_ *cobra.Command, args []string) { color.Green("\nLinters presets:") for _, p := range e.DBManager.AllPresets() { linters := e.DBManager.GetAllLinterConfigsForPreset(p) - linterNames := []string{} + var linterNames []string for _, lc := range linters { + if lc.Internal { + continue + } + linterNames = append(linterNames, lc.Name()) } sort.Strings(linterNames) fmt.Fprintf(logutils.StdOut, "%s: %s\n", color.YellowString(p), strings.Join(linterNames, ", ")) } - - os.Exit(0) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go index 873dab8177..69df211542 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/linters.go @@ -1,51 +1,72 @@ package commands import ( - "log" - "os" + "fmt" + "strings" "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/lint/linter" ) func (e *Executor) initLinters() { e.lintersCmd = &cobra.Command{ - Use: "linters", - Short: "List current linters configuration", - Run: e.executeLinters, + Use: "linters", + Short: "List current linters configuration", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: e.executeLinters, } + + fs := e.lintersCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + initConfigFileFlagSet(fs, &e.cfg.Run) + e.initLintersFlagSet(fs, &e.cfg.Linters) + e.rootCmd.AddCommand(e.lintersCmd) - e.initRunConfiguration(e.lintersCmd) } -func (e *Executor) executeLinters(_ *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint linters") - } +func (e *Executor) initLintersFlagSet(fs *pflag.FlagSet, cfg *config.Linters) { + fs.StringSliceVarP(&cfg.Disable, "disable", "D", nil, wh("Disable specific linter")) + fs.BoolVar(&cfg.DisableAll, "disable-all", false, wh("Disable all linters")) + fs.StringSliceVarP(&cfg.Enable, "enable", "E", nil, wh("Enable specific linter")) + fs.BoolVar(&cfg.EnableAll, "enable-all", false, wh("Enable all linters")) + fs.BoolVar(&cfg.Fast, "fast", false, wh("Enable only fast linters from enabled linters set (first run won't be fast)")) + fs.StringSliceVarP(&cfg.Presets, "presets", "p", nil, + wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint help linters' to see "+ + "them. This option implies option --disable-all", strings.Join(e.DBManager.AllPresets(), "|")))) +} +// executeLinters runs the 'linters' CLI command, which displays the supported linters. +func (e *Executor) executeLinters(_ *cobra.Command, _ []string) error { enabledLintersMap, err := e.EnabledLintersSet.GetEnabledLintersMap() if err != nil { - log.Fatalf("Can't get enabled linters: %s", err) - } - - color.Green("Enabled by your configuration linters:\n") - enabledLinters := make([]*linter.Config, 0, len(enabledLintersMap)) - for _, linter := range enabledLintersMap { - enabledLinters = append(enabledLinters, linter) + return fmt.Errorf("can't get enabled linters: %w", err) } - printLinterConfigs(enabledLinters) + var enabledLinters []*linter.Config var disabledLCs []*linter.Config + for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() { + if lc.Internal { + continue + } + if enabledLintersMap[lc.Name()] == nil { disabledLCs = append(disabledLCs, lc) + } else { + enabledLinters = append(enabledLinters, lc) } } + color.Green("Enabled by your configuration linters:\n") + printLinterConfigs(enabledLinters) color.Red("\nDisabled by your configuration linters:\n") printLinterConfigs(disabledLCs) - os.Exit(0) + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go index f90df9901f..4425be10fd 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go @@ -12,13 +12,20 @@ import ( "github.com/spf13/pflag" "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/exitcodes" "github.com/golangci/golangci-lint/pkg/logutils" ) -func (e *Executor) persistentPreRun(_ *cobra.Command, _ []string) { +const ( + // envHelpRun value: "1". + envHelpRun = "HELP_RUN" + envMemProfileRate = "GL_MEM_PROFILE_RATE" +) + +func (e *Executor) persistentPreRun(_ *cobra.Command, _ []string) error { if e.cfg.Run.PrintVersion { - fmt.Fprintf(logutils.StdOut, "golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date) - os.Exit(0) + _ = printVersion(logutils.StdOut, e.buildInfo) + os.Exit(exitcodes.Success) // a return nil is not enough to stop the process because we are inside the `preRun`. } runtime.GOMAXPROCS(e.cfg.Run.Concurrency) @@ -26,15 +33,15 @@ func (e *Executor) persistentPreRun(_ *cobra.Command, _ []string) { if e.cfg.Run.CPUProfilePath != "" { f, err := os.Create(e.cfg.Run.CPUProfilePath) if err != nil { - e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.CPUProfilePath, err) + return fmt.Errorf("can't create file %s: %w", e.cfg.Run.CPUProfilePath, err) } if err := pprof.StartCPUProfile(f); err != nil { - e.log.Fatalf("Can't start CPU profiling: %s", err) + return fmt.Errorf("can't start CPU profiling: %w", err) } } if e.cfg.Run.MemProfilePath != "" { - if rate := os.Getenv("GL_MEMPROFILE_RATE"); rate != "" { + if rate := os.Getenv(envMemProfileRate); rate != "" { runtime.MemProfileRate, _ = strconv.Atoi(rate) } } @@ -42,22 +49,25 @@ func (e *Executor) persistentPreRun(_ *cobra.Command, _ []string) { if e.cfg.Run.TracePath != "" { f, err := os.Create(e.cfg.Run.TracePath) if err != nil { - e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.TracePath, err) + return fmt.Errorf("can't create file %s: %w", e.cfg.Run.TracePath, err) } if err = trace.Start(f); err != nil { - e.log.Fatalf("Can't start tracing: %s", err) + return fmt.Errorf("can't start tracing: %w", err) } } + + return nil } -func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) { +func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) error { if e.cfg.Run.CPUProfilePath != "" { pprof.StopCPUProfile() } + if e.cfg.Run.MemProfilePath != "" { f, err := os.Create(e.cfg.Run.MemProfilePath) if err != nil { - e.log.Fatalf("Can't create file %s: %s", e.cfg.Run.MemProfilePath, err) + return fmt.Errorf("can't create file %s: %w", e.cfg.Run.MemProfilePath, err) } var ms runtime.MemStats @@ -65,15 +75,18 @@ func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) { printMemStats(&ms, e.log) if err := pprof.WriteHeapProfile(f); err != nil { - e.log.Fatalf("Can't write heap profile: %s", err) + return fmt.Errorf("cCan't write heap profile: %w", err) } - f.Close() + _ = f.Close() } + if e.cfg.Run.TracePath != "" { trace.Stop() } os.Exit(e.exitCode) + + return nil } func printMemStats(ms *runtime.MemStats, logger logutils.Log) { @@ -105,8 +118,8 @@ func formatMemory(memBytes uint64) string { } func getDefaultConcurrency() int { - if os.Getenv("HELP_RUN") == "1" { - // Make stable concurrency for README help generating builds. + if os.Getenv(envHelpRun) == "1" { + // Make stable concurrency for generating help documentation. const prettyConcurrency = 8 return prettyConcurrency } @@ -118,17 +131,13 @@ func (e *Executor) initRoot() { rootCmd := &cobra.Command{ Use: "golangci-lint", Short: "golangci-lint is a smart linters runner.", - Long: `Smart, fast linters runner. Run it in cloud for every GitHub pull request on https://golangci.com`, - Run: func(cmd *cobra.Command, args []string) { - if len(args) != 0 { - e.log.Fatalf("Usage: golangci-lint") - } - if err := cmd.Help(); err != nil { - e.log.Fatalf("Can't run help: %s", err) - } + Long: `Smart, fast linters runner.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() }, - PersistentPreRun: e.persistentPreRun, - PersistentPostRun: e.persistentPostRun, + PersistentPreRunE: e.persistentPreRun, + PersistentPostRunE: e.persistentPostRun, } initRootFlagSet(rootCmd.PersistentFlags(), e.cfg, e.needVersionOption()) @@ -136,14 +145,14 @@ func (e *Executor) initRoot() { } func (e *Executor) needVersionOption() bool { - return e.date != "" + return e.buildInfo.Date != "" } func initRootFlagSet(fs *pflag.FlagSet, cfg *config.Config, needVersionOption bool) { - fs.BoolVarP(&cfg.Run.IsVerbose, "verbose", "v", false, wh("verbose output")) + fs.BoolVarP(&cfg.Run.IsVerbose, "verbose", "v", false, wh("Verbose output")) var silent bool - fs.BoolVarP(&silent, "silent", "s", false, wh("disables congrats outputs")) + fs.BoolVarP(&silent, "silent", "s", false, wh("Disables congrats outputs")) if err := fs.MarkHidden("silent"); err != nil { panic(err) } @@ -156,7 +165,8 @@ func initRootFlagSet(fs *pflag.FlagSet, cfg *config.Config, needVersionOption bo fs.StringVar(&cfg.Run.CPUProfilePath, "cpu-profile-path", "", wh("Path to CPU profile output file")) fs.StringVar(&cfg.Run.MemProfilePath, "mem-profile-path", "", wh("Path to memory profile output file")) fs.StringVar(&cfg.Run.TracePath, "trace-path", "", wh("Path to trace output file")) - fs.IntVarP(&cfg.Run.Concurrency, "concurrency", "j", getDefaultConcurrency(), wh("Concurrency (default NumCPU)")) + fs.IntVarP(&cfg.Run.Concurrency, "concurrency", "j", getDefaultConcurrency(), + wh("Number of CPUs to use (Default: number of logical CPUs)")) if needVersionOption { fs.BoolVar(&cfg.Run.PrintVersion, "version", false, wh("Print version")) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index 271fffe94f..5c7083c307 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -2,18 +2,20 @@ package commands import ( "context" + "errors" "fmt" - "io/ioutil" + "io" "log" "os" "runtime" + "sort" "strings" "time" "github.com/fatih/color" - "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "golang.org/x/exp/maps" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/exitcodes" @@ -23,38 +25,21 @@ import ( "github.com/golangci/golangci-lint/pkg/packages" "github.com/golangci/golangci-lint/pkg/printers" "github.com/golangci/golangci-lint/pkg/result" - "github.com/golangci/golangci-lint/pkg/result/processors" ) -func getDefaultIssueExcludeHelp() string { - parts := []string{"Use or not use default excludes:"} - for _, ep := range config.DefaultExcludePatterns { - parts = append(parts, - fmt.Sprintf(" # %s %s: %s", ep.ID, ep.Linter, ep.Why), - fmt.Sprintf(" - %s", color.YellowString(ep.Pattern)), - "", - ) - } - return strings.Join(parts, "\n") -} - -func getDefaultDirectoryExcludeHelp() string { - parts := []string{"Use or not use default excluded directories:"} - for _, dir := range packages.StdExcludeDirRegexps { - parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) - } - parts = append(parts, "") - return strings.Join(parts, "\n") -} - -func wh(text string) string { - return color.GreenString(text) -} +const defaultFileMode = 0644 const defaultTimeout = time.Minute -//nolint:funlen -func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, isFinalInit bool) { +const ( + // envFailOnWarnings value: "1" + envFailOnWarnings = "FAIL_ON_WARNINGS" + // envMemLogEvery value: "1" + envMemLogEvery = "GL_MEM_LOG_EVERY" +) + +//nolint:funlen,gomnd +func (e *Executor) initFlagSet(fs *pflag.FlagSet, cfg *config.Config, isFinalInit bool) { hideFlag := func(name string) { if err := fs.MarkHidden(name); err != nil { panic(err) @@ -69,6 +54,10 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is } } + // Config file config + rc := &cfg.Run + initConfigFileFlagSet(fs, rc) + // Output config oc := &cfg.Output fs.StringVar(&oc.Format, "out-format", @@ -88,11 +77,11 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is } // Run config - rc := &cfg.Run fs.StringVar(&rc.ModulesDownloadMode, "modules-download-mode", "", - "Modules download mode. If not empty, passed as -mod= to go tools") + wh("Modules download mode. If not empty, passed as -mod= to go tools")) fs.IntVar(&rc.ExitCodeIfIssuesFound, "issues-exit-code", exitcodes.IssuesFound, wh("Exit code when issues were found")) + fs.StringVar(&rc.Go, "go", "", wh("Targeted Go version")) fs.StringSliceVar(&rc.BuildTags, "build-tags", nil, wh("Build tags")) fs.DurationVar(&rc.Timeout, "deadline", defaultTimeout, wh("Deadline for total work")) @@ -104,8 +93,6 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is fs.BoolVar(&rc.AnalyzeTests, "tests", true, wh("Analyze tests (*_test.go)")) fs.BoolVar(&rc.PrintResourcesUsage, "print-resources-usage", false, wh("Print avg and max memory usage of golangci-lint and total time")) - fs.StringVarP(&rc.Config, "config", "c", "", wh("Read config from file path `PATH`")) - fs.BoolVar(&rc.NoConfig, "no-config", false, wh("Don't read config")) fs.StringSliceVar(&rc.SkipDirs, "skip-dirs", nil, wh("Regexps of directories to skip")) fs.BoolVar(&rc.UseDefaultSkipDirs, "skip-dirs-use-default", true, getDefaultDirectoryExcludeHelp()) fs.StringSliceVar(&rc.SkipFiles, "skip-files", nil, wh("Regexps of files to skip")) @@ -113,9 +100,10 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is const allowParallelDesc = "Allow multiple parallel golangci-lint instances running. " + "If false (default) - golangci-lint acquires file lock on start." fs.BoolVar(&rc.AllowParallelRunners, "allow-parallel-runners", false, wh(allowParallelDesc)) - const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " + + const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " + "If false (default) - golangci-lint exits with an error if it fails to acquire file lock on start." fs.BoolVar(&rc.AllowSerialRunners, "allow-serial-runners", false, wh(allowSerialDesc)) + fs.BoolVar(&rc.ShowStats, "show-stats", false, wh("Show statistics per linter")) // Linters settings config lsc := &cfg.LintersSettings @@ -182,33 +170,13 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is true, "Goconst: ignore when constant is not used as function argument") hideFlag("goconst.ignore-calls") - // (@dixonwille) These flag is only used for testing purposes. - fs.StringSliceVar(&lsc.Depguard.Packages, "depguard.packages", nil, - "Depguard: packages to add to the list") - hideFlag("depguard.packages") - - fs.BoolVar(&lsc.Depguard.IncludeGoRoot, "depguard.include-go-root", false, - "Depguard: check list against standard lib") - hideFlag("depguard.include-go-root") - fs.IntVar(&lsc.Lll.TabWidth, "lll.tab-width", 1, "Lll: tab width in spaces") hideFlag("lll.tab-width") // Linters config lc := &cfg.Linters - fs.StringSliceVarP(&lc.Enable, "enable", "E", nil, wh("Enable specific linter")) - fs.StringSliceVarP(&lc.Disable, "disable", "D", nil, wh("Disable specific linter")) - fs.BoolVar(&lc.EnableAll, "enable-all", false, wh("Enable all linters")) - if err := fs.MarkHidden("enable-all"); err != nil { - panic(err) - } - - fs.BoolVar(&lc.DisableAll, "disable-all", false, wh("Disable all linters")) - fs.StringSliceVarP(&lc.Presets, "presets", "p", nil, - wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint linters' to see "+ - "them. This option implies option --disable-all", strings.Join(m.AllPresets(), "|")))) - fs.BoolVar(&lc.Fast, "fast", false, wh("Run only fast linters from enabled linters set (first run won't be fast)")) + e.initLintersFlagSet(fs, lc) // Issues config ic := &cfg.Issues @@ -233,18 +201,20 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is wh("Show only new issues created after git revision `REV`")) fs.StringVar(&ic.DiffPatchFilePath, "new-from-patch", "", wh("Show only new issues created in git patch with file path `PATH`")) - fs.BoolVar(&ic.NeedFix, "fix", false, "Fix found issues (if it's supported by the linter)") + fs.BoolVar(&ic.WholeFiles, "whole-files", false, + wh("Show issues in any part of update files (requires new-from-rev or new-from-patch)")) + fs.BoolVar(&ic.NeedFix, "fix", false, wh("Fix found issues (if it's supported by the linter)")) } func (e *Executor) initRunConfiguration(cmd *cobra.Command) { fs := cmd.Flags() fs.SortFlags = false // sort them as they are defined here - initFlagSet(fs, e.cfg, e.DBManager, true) + e.initFlagSet(fs, e.cfg, true) } func (e *Executor) getConfigForCommandLine() (*config.Config, error) { // We use another pflag.FlagSet here to not set `changed` flag - // on cmd.Flags() options. Otherwise string slice options will be duplicated. + // on cmd.Flags() options. Otherwise, string slice options will be duplicated. fs := pflag.NewFlagSet("config flag set", pflag.ContinueOnError) var cfg config.Config @@ -252,7 +222,7 @@ func (e *Executor) getConfigForCommandLine() (*config.Config, error) { // `changed` variable inside string slice vars will be shared. // Use another config variable here, not e.cfg, to not // affect main parsing by this parsing of only config option. - initFlagSet(fs, &cfg, e.DBManager, false) + e.initFlagSet(fs, &cfg, false) initVersionFlagSet(fs, &cfg) // Parse max options, even force version option: don't want @@ -260,13 +230,13 @@ func (e *Executor) getConfigForCommandLine() (*config.Config, error) { // cfg vs e.cfg. initRootFlagSet(fs, &cfg, true) - fs.Usage = func() {} // otherwise help text will be printed twice + fs.Usage = func() {} // otherwise, help text will be printed twice if err := fs.Parse(os.Args); err != nil { - if err == pflag.ErrHelp { + if errors.Is(err, pflag.ErrHelp) { return nil, err } - return nil, fmt.Errorf("can't parse args: %s", err) + return nil, fmt.Errorf("can't parse args: %w", err) } return &cfg, nil @@ -277,10 +247,11 @@ func (e *Executor) initRun() { Use: "run", Short: "Run the linters", Run: e.executeRun, - PreRun: func(_ *cobra.Command, _ []string) { + PreRunE: func(_ *cobra.Command, _ []string) error { if ok := e.acquireFileLock(); !ok { - e.log.Fatalf("Parallel golangci-lint is running") + return errors.New("parallel golangci-lint is running") } + return nil }, PostRun: func(_ *cobra.Command, _ []string) { e.releaseFileLock() @@ -324,6 +295,7 @@ func fixSlicesFlags(fs *pflag.FlagSet) { }) } +// runAnalysis executes the linters that have been enabled in the configuration. func (e *Executor) runAnalysis(ctx context.Context, args []string) ([]result.Issue, error) { e.cfg.Run.Args = args @@ -344,23 +316,17 @@ func (e *Executor) runAnalysis(ctx context.Context, args []string) ([]result.Iss lintCtx, err := e.contextLoader.Load(ctx, lintersToRun) if err != nil { - return nil, errors.Wrap(err, "context loading failed") + return nil, fmt.Errorf("context loading failed: %w", err) } - lintCtx.Log = e.log.Child("linters context") + lintCtx.Log = e.log.Child(logutils.DebugKeyLintersContext) - runner, err := lint.NewRunner(e.cfg, e.log.Child("runner"), - e.goenv, e.EnabledLintersSet, e.lineCache, e.DBManager, lintCtx.Packages) + runner, err := lint.NewRunner(e.cfg, e.log.Child(logutils.DebugKeyRunner), + e.goenv, e.EnabledLintersSet, e.lineCache, e.fileCache, e.DBManager, lintCtx.Packages) if err != nil { return nil, err } - issues, err := runner.Run(ctx, lintersToRun, lintCtx) - if err != nil { - return nil, err - } - - fixer := processors.NewFixer(e.cfg, e.log, e.fileCache) - return fixer.Process(issues), nil + return runner.Run(ctx, lintersToRun, lintCtx) } func (e *Executor) setOutputToDevNull() (savedStdout, savedStderr *os.File) { @@ -386,9 +352,9 @@ func (e *Executor) runAndPrint(ctx context.Context, args []string) error { e.log.Warnf("Failed to discover go env: %s", err) } - if !logutils.HaveDebugTag("linters_output") { + if !logutils.HaveDebugTag(logutils.DebugKeyLintersOutput) { // Don't allow linters and loader to print anything - log.SetOutput(ioutil.Discard) + log.SetOutput(io.Discard) savedStdout, savedStderr := e.setOutputToDevNull() defer func() { os.Stdout, os.Stderr = savedStdout, savedStderr @@ -400,44 +366,95 @@ func (e *Executor) runAndPrint(ctx context.Context, args []string) error { return err // XXX: don't loose type } - p, err := e.createPrinter() - if err != nil { - return err + formats := strings.Split(e.cfg.Output.Format, ",") + for _, format := range formats { + out := strings.SplitN(format, ":", 2) + if len(out) < 2 { + out = append(out, "") + } + + err := e.printReports(issues, out[1], out[0]) + if err != nil { + return err + } } + e.printStats(issues) + e.setExitCodeIfIssuesFound(issues) - if err = p.Print(ctx, issues); err != nil { - return fmt.Errorf("can't print %d issues: %s", len(issues), err) + e.fileCache.PrintStats(e.log) + + return nil +} + +func (e *Executor) printReports(issues []result.Issue, path, format string) error { + w, shouldClose, err := e.createWriter(path) + if err != nil { + return fmt.Errorf("can't create output for %s: %w", path, err) } - e.fileCache.PrintStats(e.log) + p, err := e.createPrinter(format, w) + if err != nil { + if file, ok := w.(io.Closer); shouldClose && ok { + _ = file.Close() + } + return err + } + + if err = p.Print(issues); err != nil { + if file, ok := w.(io.Closer); shouldClose && ok { + _ = file.Close() + } + return fmt.Errorf("can't print %d issues: %w", len(issues), err) + } + + if file, ok := w.(io.Closer); shouldClose && ok { + _ = file.Close() + } return nil } -func (e *Executor) createPrinter() (printers.Printer, error) { +func (e *Executor) createWriter(path string) (io.Writer, bool, error) { + if path == "" || path == "stdout" { + return logutils.StdOut, false, nil + } + if path == "stderr" { + return logutils.StdErr, false, nil + } + f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, defaultFileMode) + if err != nil { + return nil, false, err + } + return f, true, nil +} + +func (e *Executor) createPrinter(format string, w io.Writer) (printers.Printer, error) { var p printers.Printer - format := e.cfg.Output.Format switch format { case config.OutFormatJSON: - p = printers.NewJSON(&e.reportData) + p = printers.NewJSON(&e.reportData, w) case config.OutFormatColoredLineNumber, config.OutFormatLineNumber: p = printers.NewText(e.cfg.Output.PrintIssuedLine, format == config.OutFormatColoredLineNumber, e.cfg.Output.PrintLinterName, - e.log.Child("text_printer")) - case config.OutFormatTab: - p = printers.NewTab(e.cfg.Output.PrintLinterName, e.log.Child("tab_printer")) + e.log.Child(logutils.DebugKeyTextPrinter), w) + case config.OutFormatTab, config.OutFormatColoredTab: + p = printers.NewTab(e.cfg.Output.PrintLinterName, + format == config.OutFormatColoredTab, + e.log.Child(logutils.DebugKeyTabPrinter), w) case config.OutFormatCheckstyle: - p = printers.NewCheckstyle() + p = printers.NewCheckstyle(w) case config.OutFormatCodeClimate: - p = printers.NewCodeClimate() + p = printers.NewCodeClimate(w) case config.OutFormatHTML: - p = printers.NewHTML() + p = printers.NewHTML(w) case config.OutFormatJunitXML: - p = printers.NewJunitXML() + p = printers.NewJunitXML(w) case config.OutFormatGithubActions: - p = printers.NewGithub() + p = printers.NewGithub(w) + case config.OutFormatTeamCity: + p = printers.NewTeamCity(w) default: return nil, fmt.Errorf("unknown output format %s", format) } @@ -445,6 +462,32 @@ func (e *Executor) createPrinter() (printers.Printer, error) { return p, nil } +func (e *Executor) printStats(issues []result.Issue) { + if !e.cfg.Run.ShowStats { + return + } + + if len(issues) == 0 { + e.runCmd.Println("0 issues.") + return + } + + stats := map[string]int{} + for idx := range issues { + stats[issues[idx].FromLinter]++ + } + + e.runCmd.Printf("%d issues:\n", len(issues)) + + keys := maps.Keys(stats) + sort.Strings(keys) + + for _, key := range keys { + e.runCmd.Printf("* %s: %d\n", key, stats[key]) + } +} + +// executeRun executes the 'run' CLI command, which runs the linters. func (e *Executor) executeRun(_ *cobra.Command, args []string) { needTrackResources := e.cfg.Run.IsVerbose || e.cfg.Run.PrintResourcesUsage trackResourcesEndCh := make(chan struct{}) @@ -465,7 +508,8 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) { if err := e.runAndPrint(ctx, args); err != nil { e.log.Errorf("Running error: %s", err) if e.exitCode == exitcodes.Success { - if exitErr, ok := errors.Cause(err).(*exitcodes.ExitError); ok { + var exitErr *exitcodes.ExitError + if errors.As(err, &exitErr) { e.exitCode = exitErr.Code } else { e.exitCode = exitcodes.Failure @@ -478,7 +522,6 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) { // to be removed when deadline is finally decommissioned func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() { - // nolint:staticcheck deadlineValue := e.cfg.Run.Deadline if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout { e.cfg.Run.Timeout = deadlineValue @@ -496,7 +539,7 @@ func (e *Executor) setupExitCode(ctx context.Context) { return } - needFailOnWarnings := (os.Getenv("GL_TEST_RUN") == "1" || os.Getenv("FAIL_ON_WARNINGS") == "1") + needFailOnWarnings := os.Getenv(lintersdb.EnvTestRun) == "1" || os.Getenv(envFailOnWarnings) == "1" if needFailOnWarnings && len(e.reportData.Warnings) != 0 { e.exitCode = exitcodes.WarningInTest return @@ -520,7 +563,7 @@ func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log ticker := time.NewTicker(intervalMS * time.Millisecond) defer ticker.Stop() - logEveryRecord := os.Getenv("GL_MEM_LOG_EVERY") == "1" + logEveryRecord := os.Getenv(envMemLogEvery) == "1" const MB = 1024 * 1024 track := func() { @@ -564,3 +607,28 @@ func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log logger.Infof("Execution took %s", time.Since(startedAt)) close(done) } + +func getDefaultIssueExcludeHelp() string { + parts := []string{color.GreenString("Use or not use default excludes:")} + for _, ep := range config.DefaultExcludePatterns { + parts = append(parts, + fmt.Sprintf(" # %s %s: %s", ep.ID, ep.Linter, ep.Why), + fmt.Sprintf(" - %s", color.YellowString(ep.Pattern)), + "", + ) + } + return strings.Join(parts, "\n") +} + +func getDefaultDirectoryExcludeHelp() string { + parts := []string{color.GreenString("Use or not use default excluded directories:")} + for _, dir := range packages.StdExcludeDirRegexps { + parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) + } + parts = append(parts, "") + return strings.Join(parts, "\n") +} + +func wh(text string) string { + return color.GreenString(text) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go index 8b48e515bf..10ab7c1bb1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/version.go @@ -3,6 +3,9 @@ package commands import ( "encoding/json" "fmt" + "io" + "os" + "runtime/debug" "strings" "github.com/spf13/cobra" @@ -11,10 +14,9 @@ import ( "github.com/golangci/golangci-lint/pkg/config" ) -type jsonVersion struct { - Version string `json:"version"` - Commit string `json:"commit"` - Date string `json:"date"` +type versionInfo struct { + Info BuildInfo + BuildInfo *debug.BuildInfo } func (e *Executor) initVersionConfiguration(cmd *cobra.Command) { @@ -27,34 +29,55 @@ func initVersionFlagSet(fs *pflag.FlagSet, cfg *config.Config) { // Version config vc := &cfg.Version fs.StringVar(&vc.Format, "format", "", wh("The version's format can be: 'short', 'json'")) + fs.BoolVar(&vc.Debug, "debug", false, wh("Add build information")) } func (e *Executor) initVersion() { versionCmd := &cobra.Command{ - Use: "version", - Short: "Version", - RunE: func(cmd *cobra.Command, _ []string) error { + Use: "version", + Short: "Version", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: func(_ *cobra.Command, _ []string) error { + if e.cfg.Version.Debug { + info, ok := debug.ReadBuildInfo() + if !ok { + return nil + } + + switch strings.ToLower(e.cfg.Version.Format) { + case "json": + return json.NewEncoder(os.Stdout).Encode(versionInfo{ + Info: e.buildInfo, + BuildInfo: info, + }) + + default: + fmt.Println(info.String()) + return printVersion(os.Stdout, e.buildInfo) + } + } + switch strings.ToLower(e.cfg.Version.Format) { case "short": - fmt.Println(e.version) + fmt.Println(e.buildInfo.Version) + return nil + case "json": - ver := jsonVersion{ - Version: e.version, - Commit: e.commit, - Date: e.date, - } - data, err := json.Marshal(&ver) - if err != nil { - return err - } - fmt.Println(string(data)) + return json.NewEncoder(os.Stdout).Encode(e.buildInfo) + default: - fmt.Printf("golangci-lint has version %s built from %s on %s\n", e.version, e.commit, e.date) + return printVersion(os.Stdout, e.buildInfo) } - return nil }, } e.rootCmd.AddCommand(versionCmd) e.initVersionConfiguration(versionCmd) } + +func printVersion(w io.Writer, buildInfo BuildInfo) error { + _, err := fmt.Fprintf(w, "golangci-lint has version %s built with %s from %s on %s\n", + buildInfo.Version, buildInfo.GoVersion, buildInfo.Commit, buildInfo.Date) + return err +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 931ddbbbee..a5483a20e3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -1,7 +1,17 @@ package config +import ( + "os" + "strings" + + hcversion "github.com/hashicorp/go-version" + "github.com/ldez/gomoddirectives" +) + +// Config encapsulates the config data specified in the golangci yaml config file. type Config struct { - Run Run + cfgDir string // The directory containing the golangci config file. + Run Run Output Output @@ -15,6 +25,11 @@ type Config struct { InternalTest bool // Option is used only for testing golangci-lint code, don't use it } +// GetConfigDir returns the directory that contains golangci config file. +func (c *Config) GetConfigDir() string { + return c.cfgDir +} + func NewDefault() *Config { return &Config{ LintersSettings: defaultLintersSettings, @@ -23,4 +38,34 @@ func NewDefault() *Config { type Version struct { Format string `mapstructure:"format"` + Debug bool `mapstructure:"debug"` +} + +func IsGreaterThanOrEqualGo122(v string) bool { + v1, err := hcversion.NewVersion(strings.TrimPrefix(v, "go")) + if err != nil { + return false + } + + limit, err := hcversion.NewVersion("1.22") + if err != nil { + return false + } + + return v1.GreaterThanOrEqual(limit) +} + +func DetectGoVersion() string { + file, _ := gomoddirectives.GetModuleFile() + + if file != nil && file.Go != nil && file.Go.Version != "" { + return file.Go.Version + } + + v := os.Getenv("GOVERSION") + if v != "" { + return v + } + + return "1.17" } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go index 71bf2a90ef..27dcf81056 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -54,7 +54,7 @@ var DefaultExcludePatterns = []ExcludePattern{ }, { ID: "EXC0008", - Pattern: "(G104|G307)", + Pattern: "(G104)", Linter: "gosec", Why: "Duplicated errcheck checks", }, @@ -97,7 +97,7 @@ var DefaultExcludePatterns = []ExcludePattern{ }, { ID: "EXC0015", - Pattern: `should have a package comment, unless it's in another file for this package`, + Pattern: `should have a package comment`, Linter: "revive", Why: "Annoying issue about not having a comment. The rare codebase has such comments", }, @@ -115,6 +115,7 @@ type Issues struct { DiffFromRevision string `mapstructure:"new-from-rev"` DiffPatchFilePath string `mapstructure:"new-from-patch"` + WholeFiles bool `mapstructure:"whole-files"` Diff bool `mapstructure:"new"` NeedFix bool `mapstructure:"fix"` @@ -124,32 +125,39 @@ type ExcludeRule struct { BaseRule `mapstructure:",squash"` } -func (e ExcludeRule) Validate() error { +func (e *ExcludeRule) Validate() error { return e.BaseRule.Validate(excludeRuleMinConditionsCount) } type BaseRule struct { - Linters []string - Path string - Text string - Source string + Linters []string + Path string + PathExcept string `mapstructure:"path-except"` + Text string + Source string } -func (b BaseRule) Validate(minConditionsCount int) error { +func (b *BaseRule) Validate(minConditionsCount int) error { if err := validateOptionalRegex(b.Path); err != nil { - return fmt.Errorf("invalid path regex: %v", err) + return fmt.Errorf("invalid path regex: %w", err) + } + if err := validateOptionalRegex(b.PathExcept); err != nil { + return fmt.Errorf("invalid path-except regex: %w", err) } if err := validateOptionalRegex(b.Text); err != nil { - return fmt.Errorf("invalid text regex: %v", err) + return fmt.Errorf("invalid text regex: %w", err) } if err := validateOptionalRegex(b.Source); err != nil { - return fmt.Errorf("invalid source regex: %v", err) + return fmt.Errorf("invalid source regex: %w", err) } nonBlank := 0 if len(b.Linters) > 0 { nonBlank++ } - if b.Path != "" { + // Filtering by path counts as one condition, regardless how it is done (one or both). + // Otherwise, a rule with Path and PathExcept set would pass validation + // whereas before the introduction of path-except that wouldn't have been precise enough. + if b.Path != "" || b.PathExcept != "" { nonBlank++ } if b.Text != "" { @@ -159,7 +167,7 @@ func (b BaseRule) Validate(minConditionsCount int) error { nonBlank++ } if nonBlank < minConditionsCount { - return fmt.Errorf("at least %d of (text, source, path, linters) should be set", minConditionsCount) + return fmt.Errorf("at least %d of (text, source, path[-except], linters) should be set", minConditionsCount) } return nil } @@ -187,15 +195,16 @@ func GetDefaultExcludePatternsStrings() []string { return ret } +// TODO(ldez): this behavior must be changed in v2, because this is confusing. func GetExcludePatterns(include []string) []ExcludePattern { - includeMap := make(map[string]bool, len(include)) + includeMap := make(map[string]struct{}, len(include)) for _, inc := range include { - includeMap[inc] = true + includeMap[inc] = struct{}{} } var ret []ExcludePattern for _, p := range DefaultExcludePatterns { - if !includeMap[p.ID] { + if _, ok := includeMap[p.ID]; !ok { ret = append(ret, p) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go index 3ee1854f6a..a3206f5978 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -1,99 +1,206 @@ package config -import "github.com/pkg/errors" +import ( + "encoding" + "errors" + "runtime" + + "gopkg.in/yaml.v3" +) var defaultLintersSettings = LintersSettings{ + Asasalint: AsasalintSettings{ + UseBuiltinExclusions: true, + }, + Decorder: DecorderSettings{ + DecOrder: []string{"type", "const", "var", "func"}, + DisableDecNumCheck: true, + DisableDecOrderCheck: true, + DisableInitFuncFirstCheck: true, + }, + Dogsled: DogsledSettings{ + MaxBlankIdentifiers: 2, + }, + ErrorLint: ErrorLintSettings{ + Errorf: true, + ErrorfMulti: true, + Asserts: true, + Comparison: true, + }, + Exhaustive: ExhaustiveSettings{ + Check: []string{"switch"}, + CheckGenerated: false, + DefaultSignifiesExhaustive: false, + IgnoreEnumMembers: "", + PackageScopeOnly: false, + ExplicitExhaustiveMap: false, + ExplicitExhaustiveSwitch: false, + }, + Forbidigo: ForbidigoSettings{ + ExcludeGodocExamples: true, + }, + Gci: GciSettings{ + Sections: []string{"standard", "default"}, + SkipGenerated: true, + }, + Gocognit: GocognitSettings{ + MinComplexity: 30, + }, + Gocritic: GoCriticSettings{ + SettingsPerCheck: map[string]GoCriticCheckSettings{}, + }, + Godox: GodoxSettings{ + Keywords: []string{}, + }, + Godot: GodotSettings{ + Scope: "declarations", + Period: true, + }, + Gofumpt: GofumptSettings{ + LangVersion: "", + ModulePath: "", + ExtraRules: false, + }, + Gosec: GoSecSettings{ + Concurrency: runtime.NumCPU(), + }, + Gosmopolitan: GosmopolitanSettings{ + AllowTimeLocal: false, + EscapeHatches: []string{}, + IgnoreTests: true, + WatchForScripts: []string{"Han"}, + }, + Ifshort: IfshortSettings{ + MaxDeclLines: 1, + MaxDeclChars: 30, + }, + Inamedparam: INamedParamSettings{ + SkipSingleParam: false, + }, + InterfaceBloat: InterfaceBloatSettings{ + Max: 10, + }, Lll: LllSettings{ LineLength: 120, TabWidth: 1, }, - Unparam: UnparamSettings{ - Algo: "cha", + LoggerCheck: LoggerCheckSettings{ + Kitlog: true, + Klog: true, + Logr: true, + Zap: true, + RequireStringKey: false, + NoPrintfLike: false, + Rules: nil, + }, + MaintIdx: MaintIdxSettings{ + Under: 20, }, Nakedret: NakedretSettings{ MaxFuncLines: 30, }, + Nestif: NestifSettings{ + MinComplexity: 5, + }, + NoLintLint: NoLintLintSettings{ + RequireExplanation: false, + RequireSpecific: false, + AllowUnused: false, + }, + PerfSprint: PerfSprintSettings{ + IntConversion: true, + ErrError: false, + ErrorF: true, + SprintF1: true, + }, Prealloc: PreallocSettings{ Simple: true, RangeLoops: true, ForLoops: false, }, - Gocritic: GocriticSettings{ - SettingsPerCheck: map[string]GocriticCheckSettings{}, + Predeclared: PredeclaredSettings{ + Ignore: "", + Qualified: false, + }, + SlogLint: SlogLintSettings{ + NoMixedArgs: true, + KVOnly: false, + AttrOnly: false, + ContextOnly: false, + StaticMsg: false, + NoRawKeys: false, + KeyNamingCase: "", + ArgsOnSepLines: false, }, - Godox: GodoxSettings{ - Keywords: []string{}, + TagAlign: TagAlignSettings{ + Align: true, + Sort: true, + Order: nil, + Strict: false, }, - Dogsled: DogsledSettings{ - MaxBlankIdentifiers: 2, + Testpackage: TestpackageSettings{ + SkipRegexp: `(export|internal)_test\.go`, + AllowPackages: []string{"main"}, }, - Gocognit: GocognitSettings{ - MinComplexity: 30, + Unparam: UnparamSettings{ + Algo: "cha", + }, + Unused: UnusedSettings{ + FieldWritesAreUses: true, + PostStatementsAreReads: false, + ExportedIsUsed: true, + ExportedFieldsAreUsed: true, + ParametersAreUsed: true, + LocalVariablesAreUsed: true, + GeneratedIsUsed: true, + }, + UseStdlibVars: UseStdlibVarsSettings{ + HTTPMethod: true, + HTTPStatusCode: true, + }, + Varnamelen: VarnamelenSettings{ + MaxDistance: 5, + MinNameLength: 3, }, WSL: WSLSettings{ StrictAppend: true, AllowAssignAndCallCuddle: true, AllowAssignAndAnythingCuddle: false, AllowMultiLineAssignCuddle: true, - AllowCuddleDeclaration: false, + ForceCaseTrailingWhitespaceLimit: 0, AllowTrailingComment: false, AllowSeparatedLeadingComment: false, + AllowCuddleDeclaration: false, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, ForceCuddleErrCheckAndAssign: false, + ErrorVariableNames: []string{"err"}, ForceExclusiveShortDeclarations: false, - ForceCaseTrailingWhitespaceLimit: 0, - }, - NoLintLint: NoLintLintSettings{ - RequireExplanation: false, - AllowLeadingSpace: true, - RequireSpecific: false, - AllowUnused: false, - }, - Testpackage: TestpackageSettings{ - SkipRegexp: `(export|internal)_test\.go`, - }, - Nestif: NestifSettings{ - MinComplexity: 5, - }, - Exhaustive: ExhaustiveSettings{ - CheckGenerated: false, - DefaultSignifiesExhaustive: false, - }, - Gofumpt: GofumptSettings{ - ExtraRules: false, - }, - ErrorLint: ErrorLintSettings{ - Errorf: true, - Asserts: true, - Comparison: true, - }, - Ifshort: IfshortSettings{ - MaxDeclLines: 1, - MaxDeclChars: 30, - }, - Predeclared: PredeclaredSettings{ - Ignore: "", - Qualified: false, - }, - Forbidigo: ForbidigoSettings{ - ExcludeGodocExamples: true, }, } type LintersSettings struct { + Asasalint AsasalintSettings + BiDiChk BiDiChkSettings Cyclop Cyclop + Decorder DecorderSettings Depguard DepGuardSettings Dogsled DogsledSettings Dupl DuplSettings + DupWord DupWordSettings Errcheck ErrcheckSettings + ErrChkJSON ErrChkJSONSettings ErrorLint ErrorLintSettings Exhaustive ExhaustiveSettings ExhaustiveStruct ExhaustiveStructSettings + Exhaustruct ExhaustructSettings Forbidigo ForbidigoSettings Funlen FunlenSettings Gci GciSettings + GinkgoLinter GinkgoLinterSettings Gocognit GocognitSettings Goconst GoConstSettings - Gocritic GocriticSettings + Gocritic GoCriticSettings Gocyclo GoCycloSettings Godot GodotSettings Godox GodoxSettings @@ -107,30 +214,52 @@ type LintersSettings struct { Gomodguard GoModGuardSettings Gosec GoSecSettings Gosimple StaticCheckSettings + Gosmopolitan GosmopolitanSettings Govet GovetSettings + Grouper GrouperSettings Ifshort IfshortSettings ImportAs ImportAsSettings + Inamedparam INamedParamSettings + InterfaceBloat InterfaceBloatSettings + Ireturn IreturnSettings Lll LllSettings + LoggerCheck LoggerCheckSettings + MaintIdx MaintIdxSettings Makezero MakezeroSettings Maligned MalignedSettings Misspell MisspellSettings + MustTag MustTagSettings Nakedret NakedretSettings Nestif NestifSettings + NilNil NilNilSettings + Nlreturn NlreturnSettings NoLintLint NoLintLintSettings + NoNamedReturns NoNamedReturnsSettings + ParallelTest ParallelTestSettings + PerfSprint PerfSprintSettings Prealloc PreallocSettings Predeclared PredeclaredSettings Promlinter PromlinterSettings + ProtoGetter ProtoGetterSettings + Reassign ReassignSettings Revive ReviveSettings RowsErrCheck RowsErrCheckSettings + SlogLint SlogLintSettings + Spancheck SpancheckSettings Staticcheck StaticCheckSettings Structcheck StructCheckSettings Stylecheck StaticCheckSettings + TagAlign TagAlignSettings Tagliatelle TagliatelleSettings + Tenv TenvSettings + Testifylint TestifylintSettings Testpackage TestpackageSettings Thelper ThelperSettings Unparam UnparamSettings - Unused StaticCheckSettings + Unused UnusedSettings + UseStdlibVars UseStdlibVarsSettings Varcheck VarCheckSettings + Varnamelen VarnamelenSettings Whitespace WhitespaceSettings Wrapcheck WrapcheckSettings WSL WSLSettings @@ -138,6 +267,24 @@ type LintersSettings struct { Custom map[string]CustomLinterSettings } +type AsasalintSettings struct { + Exclude []string `mapstructure:"exclude"` + UseBuiltinExclusions bool `mapstructure:"use-builtin-exclusions"` + IgnoreTest bool `mapstructure:"ignore-test"` +} + +type BiDiChkSettings struct { + LeftToRightEmbedding bool `mapstructure:"left-to-right-embedding"` + RightToLeftEmbedding bool `mapstructure:"right-to-left-embedding"` + PopDirectionalFormatting bool `mapstructure:"pop-directional-formatting"` + LeftToRightOverride bool `mapstructure:"left-to-right-override"` + RightToLeftOverride bool `mapstructure:"right-to-left-override"` + LeftToRightIsolate bool `mapstructure:"left-to-right-isolate"` + RightToLeftIsolate bool `mapstructure:"right-to-left-isolate"` + FirstStrongIsolate bool `mapstructure:"first-strong-isolate"` + PopDirectionalIsolate bool `mapstructure:"pop-directional-isolate"` +} + type Cyclop struct { MaxComplexity int `mapstructure:"max-complexity"` PackageAverage float64 `mapstructure:"package-average"` @@ -145,10 +292,30 @@ type Cyclop struct { } type DepGuardSettings struct { - ListType string `mapstructure:"list-type"` - Packages []string - IncludeGoRoot bool `mapstructure:"include-go-root"` - PackagesWithErrorMessage map[string]string `mapstructure:"packages-with-error-message"` + Rules map[string]*DepGuardList `mapstructure:"rules"` +} + +type DepGuardList struct { + ListMode string `mapstructure:"list-mode"` + Files []string `mapstructure:"files"` + Allow []string `mapstructure:"allow"` + Deny []DepGuardDeny `mapstructure:"deny"` +} + +type DepGuardDeny struct { + Pkg string `mapstructure:"pkg"` + Desc string `mapstructure:"desc"` +} + +type DecorderSettings struct { + DecOrder []string `mapstructure:"dec-order"` + IgnoreUnderscoreVars bool `mapstructure:"ignore-underscore-vars"` + DisableDecNumCheck bool `mapstructure:"disable-dec-num-check"` + DisableTypeDecNumCheck bool `mapstructure:"disable-type-dec-num-check"` + DisableConstDecNumCheck bool `mapstructure:"disable-const-dec-num-check"` + DisableVarDecNumCheck bool `mapstructure:"disable-var-dec-num-check"` + DisableDecOrderCheck bool `mapstructure:"disable-dec-order-check"` + DisableInitFuncFirstCheck bool `mapstructure:"disable-init-func-first-check"` } type DogsledSettings struct { @@ -159,40 +326,119 @@ type DuplSettings struct { Threshold int } +type DupWordSettings struct { + Keywords []string `mapstructure:"keywords"` + Ignore []string `mapstructure:"ignore"` +} + type ErrcheckSettings struct { - CheckTypeAssertions bool `mapstructure:"check-type-assertions"` - CheckAssignToBlank bool `mapstructure:"check-blank"` - Ignore string `mapstructure:"ignore"` - Exclude string `mapstructure:"exclude"` + DisableDefaultExclusions bool `mapstructure:"disable-default-exclusions"` + CheckTypeAssertions bool `mapstructure:"check-type-assertions"` + CheckAssignToBlank bool `mapstructure:"check-blank"` + Ignore string `mapstructure:"ignore"` + ExcludeFunctions []string `mapstructure:"exclude-functions"` + + // Deprecated: use ExcludeFunctions instead + Exclude string `mapstructure:"exclude"` +} + +type ErrChkJSONSettings struct { + CheckErrorFreeEncoding bool `mapstructure:"check-error-free-encoding"` + ReportNoExported bool `mapstructure:"report-no-exported"` } type ErrorLintSettings struct { - Errorf bool `mapstructure:"errorf"` - Asserts bool `mapstructure:"asserts"` - Comparison bool `mapstructure:"comparison"` + Errorf bool `mapstructure:"errorf"` + ErrorfMulti bool `mapstructure:"errorf-multi"` + Asserts bool `mapstructure:"asserts"` + Comparison bool `mapstructure:"comparison"` } type ExhaustiveSettings struct { - CheckGenerated bool `mapstructure:"check-generated"` - DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + Check []string `mapstructure:"check"` + CheckGenerated bool `mapstructure:"check-generated"` + DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + IgnoreEnumMembers string `mapstructure:"ignore-enum-members"` + IgnoreEnumTypes string `mapstructure:"ignore-enum-types"` + PackageScopeOnly bool `mapstructure:"package-scope-only"` + ExplicitExhaustiveMap bool `mapstructure:"explicit-exhaustive-map"` + ExplicitExhaustiveSwitch bool `mapstructure:"explicit-exhaustive-switch"` + DefaultCaseRequired bool `mapstructure:"default-case-required"` } type ExhaustiveStructSettings struct { StructPatterns []string `mapstructure:"struct-patterns"` } +type ExhaustructSettings struct { + Include []string `mapstructure:"include"` + Exclude []string `mapstructure:"exclude"` +} + type ForbidigoSettings struct { - Forbid []string `mapstructure:"forbid"` - ExcludeGodocExamples bool `mapstructure:"exclude-godoc-examples"` + Forbid []ForbidigoPattern `mapstructure:"forbid"` + ExcludeGodocExamples bool `mapstructure:"exclude-godoc-examples"` + AnalyzeTypes bool `mapstructure:"analyze-types"` +} + +var _ encoding.TextUnmarshaler = &ForbidigoPattern{} + +// ForbidigoPattern corresponds to forbidigo.pattern and adds mapstructure support. +// The YAML field names must match what forbidigo expects. +type ForbidigoPattern struct { + // patternString gets populated when the config contains a string as entry in ForbidigoSettings.Forbid[] + // because ForbidigoPattern implements encoding.TextUnmarshaler + // and the reader uses the mapstructure.TextUnmarshallerHookFunc as decoder hook. + // + // If the entry is a map, then the other fields are set as usual by mapstructure. + patternString string + + Pattern string `yaml:"p" mapstructure:"p"` + Package string `yaml:"pkg,omitempty" mapstructure:"pkg,omitempty"` + Msg string `yaml:"msg,omitempty" mapstructure:"msg,omitempty"` +} + +func (p *ForbidigoPattern) UnmarshalText(text []byte) error { + // Validation happens when instantiating forbidigo. + p.patternString = string(text) + return nil +} + +// MarshalString converts the pattern into a string as needed by forbidigo.NewLinter. +// +// MarshalString is intentionally not called MarshalText, +// although it has the same signature +// because implementing encoding.TextMarshaler led to infinite recursion when yaml.Marshal called MarshalText. +func (p *ForbidigoPattern) MarshalString() ([]byte, error) { + if p.patternString != "" { + return []byte(p.patternString), nil + } + + return yaml.Marshal(p) } type FunlenSettings struct { - Lines int - Statements int + Lines int + Statements int + IgnoreComments bool `mapstructure:"ignore-comments"` } type GciSettings struct { - LocalPrefixes string `mapstructure:"local-prefixes"` + LocalPrefixes string `mapstructure:"local-prefixes"` // Deprecated + Sections []string `mapstructure:"sections"` + SkipGenerated bool `mapstructure:"skip-generated"` + CustomOrder bool `mapstructure:"custom-order"` +} + +type GinkgoLinterSettings struct { + SuppressLenAssertion bool `mapstructure:"suppress-len-assertion"` + SuppressNilAssertion bool `mapstructure:"suppress-nil-assertion"` + SuppressErrAssertion bool `mapstructure:"suppress-err-assertion"` + SuppressCompareAssertion bool `mapstructure:"suppress-compare-assertion"` + SuppressAsyncAssertion bool `mapstructure:"suppress-async-assertion"` + SuppressTypeCompareWarning bool `mapstructure:"suppress-type-compare-assertion"` + ForbidFocusContainer bool `mapstructure:"forbid-focus-container"` + AllowHaveLenZero bool `mapstructure:"allow-havelen-zero"` } type GocognitSettings struct { @@ -200,16 +446,28 @@ type GocognitSettings struct { } type GoConstSettings struct { - IgnoreTests bool `mapstructure:"ignore-tests"` - MatchWithConstants bool `mapstructure:"match-constant"` - MinStringLen int `mapstructure:"min-len"` - MinOccurrencesCount int `mapstructure:"min-occurrences"` - ParseNumbers bool `mapstructure:"numbers"` - NumberMin int `mapstructure:"min"` - NumberMax int `mapstructure:"max"` - IgnoreCalls bool `mapstructure:"ignore-calls"` + IgnoreStrings string `mapstructure:"ignore-strings"` + IgnoreTests bool `mapstructure:"ignore-tests"` + MatchWithConstants bool `mapstructure:"match-constant"` + MinStringLen int `mapstructure:"min-len"` + MinOccurrencesCount int `mapstructure:"min-occurrences"` + ParseNumbers bool `mapstructure:"numbers"` + NumberMin int `mapstructure:"min"` + NumberMax int `mapstructure:"max"` + IgnoreCalls bool `mapstructure:"ignore-calls"` } +type GoCriticSettings struct { + Go string `mapstructure:"-"` + EnabledChecks []string `mapstructure:"enabled-checks"` + DisabledChecks []string `mapstructure:"disabled-checks"` + EnabledTags []string `mapstructure:"enabled-tags"` + DisabledTags []string `mapstructure:"disabled-tags"` + SettingsPerCheck map[string]GoCriticCheckSettings `mapstructure:"settings"` +} + +type GoCriticCheckSettings map[string]any + type GoCycloSettings struct { MinComplexity int `mapstructure:"min-complexity"` } @@ -218,6 +476,7 @@ type GodotSettings struct { Scope string `mapstructure:"scope"` Exclude []string `mapstructure:"exclude"` Capital bool `mapstructure:"capital"` + Period bool `mapstructure:"period"` // Deprecated: use `Scope` instead CheckAll bool `mapstructure:"check-all"` @@ -228,11 +487,21 @@ type GodoxSettings struct { } type GoFmtSettings struct { - Simplify bool + Simplify bool + RewriteRules []GoFmtRewriteRule `mapstructure:"rewrite-rules"` +} + +type GoFmtRewriteRule struct { + Pattern string + Replacement string } type GofumptSettings struct { - ExtraRules bool `mapstructure:"extra-rules"` + ModulePath string `mapstructure:"module-path"` + ExtraRules bool `mapstructure:"extra-rules"` + + // Deprecated: use the global `run.go` instead. + LangVersion string `mapstructure:"lang-version"` } type GoHeaderSettings struct { @@ -250,7 +519,11 @@ type GoLintSettings struct { } type GoMndSettings struct { - Settings map[string]map[string]interface{} + Settings map[string]map[string]any // Deprecated + Checks []string `mapstructure:"checks"` + IgnoredNumbers []string `mapstructure:"ignored-numbers"` + IgnoredFiles []string `mapstructure:"ignored-files"` + IgnoredFunctions []string `mapstructure:"ignored-functions"` } type GoModDirectivesSettings struct { @@ -279,14 +552,26 @@ type GoModGuardSettings struct { } type GoSecSettings struct { - Includes []string - Excludes []string - Config map[string]interface{} `mapstructure:"config"` + Includes []string `mapstructure:"includes"` + Excludes []string `mapstructure:"excludes"` + Severity string `mapstructure:"severity"` + Confidence string `mapstructure:"confidence"` + ExcludeGenerated bool `mapstructure:"exclude-generated"` + Config map[string]any `mapstructure:"config"` + Concurrency int `mapstructure:"concurrency"` +} + +type GosmopolitanSettings struct { + AllowTimeLocal bool `mapstructure:"allow-time-local"` + EscapeHatches []string `mapstructure:"escape-hatches"` + IgnoreTests bool `mapstructure:"ignore-tests"` + WatchForScripts []string `mapstructure:"watch-for-scripts"` } type GovetSettings struct { - CheckShadowing bool `mapstructure:"check-shadowing"` - Settings map[string]map[string]interface{} + Go string `mapstructure:"-"` + CheckShadowing bool `mapstructure:"check-shadowing"` + Settings map[string]map[string]any Enable []string Disable []string @@ -294,7 +579,7 @@ type GovetSettings struct { DisableAll bool `mapstructure:"disable-all"` } -func (cfg GovetSettings) Validate() error { +func (cfg *GovetSettings) Validate() error { if cfg.EnableAll && cfg.DisableAll { return errors.New("enable-all and disable-all can't be combined") } @@ -307,14 +592,26 @@ func (cfg GovetSettings) Validate() error { return nil } +type GrouperSettings struct { + ConstRequireSingleConst bool `mapstructure:"const-require-single-const"` + ConstRequireGrouping bool `mapstructure:"const-require-grouping"` + ImportRequireSingleImport bool `mapstructure:"import-require-single-import"` + ImportRequireGrouping bool `mapstructure:"import-require-grouping"` + TypeRequireSingleType bool `mapstructure:"type-require-single-type"` + TypeRequireGrouping bool `mapstructure:"type-require-grouping"` + VarRequireSingleVar bool `mapstructure:"var-require-single-var"` + VarRequireGrouping bool `mapstructure:"var-require-grouping"` +} + type IfshortSettings struct { MaxDeclLines int `mapstructure:"max-decl-lines"` MaxDeclChars int `mapstructure:"max-decl-chars"` } type ImportAsSettings struct { - Alias []ImportAsAlias - NoUnaliased bool `mapstructure:"no-unaliased"` + Alias []ImportAsAlias + NoUnaliased bool `mapstructure:"no-unaliased"` + NoExtraAliases bool `mapstructure:"no-extra-aliases"` } type ImportAsAlias struct { @@ -322,11 +619,38 @@ type ImportAsAlias struct { Alias string } +type INamedParamSettings struct { + SkipSingleParam bool `mapstructure:"skip-single-param"` +} + +type InterfaceBloatSettings struct { + Max int `mapstructure:"max"` +} + +type IreturnSettings struct { + Allow []string `mapstructure:"allow"` + Reject []string `mapstructure:"reject"` +} + type LllSettings struct { LineLength int `mapstructure:"line-length"` TabWidth int `mapstructure:"tab-width"` } +type LoggerCheckSettings struct { + Kitlog bool `mapstructure:"kitlog"` + Klog bool `mapstructure:"klog"` + Logr bool `mapstructure:"logr"` + Zap bool `mapstructure:"zap"` + RequireStringKey bool `mapstructure:"require-string-key"` + NoPrintfLike bool `mapstructure:"no-printf-like"` + Rules []string `mapstructure:"rules"` +} + +type MaintIdxSettings struct { + Under int `mapstructure:"under"` +} + type MakezeroSettings struct { Always bool } @@ -336,10 +660,20 @@ type MalignedSettings struct { } type MisspellSettings struct { - Locale string + Mode string `mapstructure:"mode"` + Locale string + // TODO(ldez): v2 the option must be renamed to `IgnoredRules`. IgnoreWords []string `mapstructure:"ignore-words"` } +type MustTagSettings struct { + Functions []struct { + Name string `mapstructure:"name"` + Tag string `mapstructure:"tag"` + ArgPos int `mapstructure:"arg-pos"` + } `mapstructure:"functions"` +} + type NakedretSettings struct { MaxFuncLines int `mapstructure:"max-func-lines"` } @@ -348,14 +682,37 @@ type NestifSettings struct { MinComplexity int `mapstructure:"min-complexity"` } +type NilNilSettings struct { + CheckedTypes []string `mapstructure:"checked-types"` +} + +type NlreturnSettings struct { + BlockSize int `mapstructure:"block-size"` +} + type NoLintLintSettings struct { RequireExplanation bool `mapstructure:"require-explanation"` - AllowLeadingSpace bool `mapstructure:"allow-leading-space"` RequireSpecific bool `mapstructure:"require-specific"` AllowNoExplanation []string `mapstructure:"allow-no-explanation"` AllowUnused bool `mapstructure:"allow-unused"` } +type NoNamedReturnsSettings struct { + ReportErrorInDefer bool `mapstructure:"report-error-in-defer"` +} + +type ParallelTestSettings struct { + IgnoreMissing bool `mapstructure:"ignore-missing"` + IgnoreMissingSubtests bool `mapstructure:"ignore-missing-subtests"` +} + +type PerfSprintSettings struct { + IntConversion bool `mapstructure:"int-conversion"` + ErrError bool `mapstructure:"err-error"` + ErrorF bool `mapstructure:"errorf"` + SprintF1 bool `mapstructure:"sprintf1"` +} + type PreallocSettings struct { Simple bool RangeLoops bool `mapstructure:"range-loops"` @@ -372,13 +729,26 @@ type PromlinterSettings struct { DisabledLinters []string `mapstructure:"disabled-linters"` } +type ProtoGetterSettings struct { + SkipGeneratedBy []string `mapstructure:"skip-generated-by"` + SkipFiles []string `mapstructure:"skip-files"` + SkipAnyGenerated bool `mapstructure:"skip-any-generated"` + ReplaceFirstArgInAppend bool `mapstructure:"replace-first-arg-in-append"` +} + +type ReassignSettings struct { + Patterns []string `mapstructure:"patterns"` +} + type ReviveSettings struct { + MaxOpenFiles int `mapstructure:"max-open-files"` IgnoreGeneratedHeader bool `mapstructure:"ignore-generated-header"` Confidence float64 Severity string + EnableAllRules bool `mapstructure:"enable-all-rules"` Rules []struct { Name string - Arguments []interface{} + Arguments []any Severity string Disabled bool } @@ -394,7 +764,24 @@ type RowsErrCheckSettings struct { Packages []string } +type SlogLintSettings struct { + NoMixedArgs bool `mapstructure:"no-mixed-args"` + KVOnly bool `mapstructure:"kv-only"` + AttrOnly bool `mapstructure:"attr-only"` + ContextOnly bool `mapstructure:"context-only"` + StaticMsg bool `mapstructure:"static-msg"` + NoRawKeys bool `mapstructure:"no-raw-keys"` + KeyNamingCase string `mapstructure:"key-naming-case"` + ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"` +} + +type SpancheckSettings struct { + Checks []string `mapstructure:"checks"` + IgnoreCheckSignatures []string `mapstructure:"ignore-check-signatures"` +} + type StaticCheckSettings struct { + // Deprecated: use the global `run.go` instead. GoVersion string `mapstructure:"go"` Checks []string `mapstructure:"checks"` @@ -411,6 +798,13 @@ type StructCheckSettings struct { CheckExportedFields bool `mapstructure:"exported-fields"` } +type TagAlignSettings struct { + Align bool `mapstructure:"align"` + Sort bool `mapstructure:"sort"` + Order []string `mapstructure:"order"` + Strict bool `mapstructure:"strict"` +} + type TagliatelleSettings struct { Case struct { Rules map[string]string @@ -418,26 +812,60 @@ type TagliatelleSettings struct { } } +type TestifylintSettings struct { + EnableAll bool `mapstructure:"enable-all"` + DisableAll bool `mapstructure:"disable-all"` + EnabledCheckers []string `mapstructure:"enable"` + DisabledCheckers []string `mapstructure:"disable"` + + ExpectedActual struct { + ExpVarPattern string `mapstructure:"pattern"` + } `mapstructure:"expected-actual"` + + RequireError struct { + FnPattern string `mapstructure:"fn-pattern"` + } `mapstructure:"require-error"` + + SuiteExtraAssertCall struct { + Mode string `mapstructure:"mode"` + } `mapstructure:"suite-extra-assert-call"` +} + type TestpackageSettings struct { - SkipRegexp string `mapstructure:"skip-regexp"` + SkipRegexp string `mapstructure:"skip-regexp"` + AllowPackages []string `mapstructure:"allow-packages"` } type ThelperSettings struct { - Test struct { - First bool `mapstructure:"first"` - Name bool `mapstructure:"name"` - Begin bool `mapstructure:"begin"` - } `mapstructure:"test"` - Benchmark struct { - First bool `mapstructure:"first"` - Name bool `mapstructure:"name"` - Begin bool `mapstructure:"begin"` - } `mapstructure:"benchmark"` - TB struct { - First bool `mapstructure:"first"` - Name bool `mapstructure:"name"` - Begin bool `mapstructure:"begin"` - } `mapstructure:"tb"` + Test ThelperOptions `mapstructure:"test"` + Fuzz ThelperOptions `mapstructure:"fuzz"` + Benchmark ThelperOptions `mapstructure:"benchmark"` + TB ThelperOptions `mapstructure:"tb"` +} + +type ThelperOptions struct { + First *bool `mapstructure:"first"` + Name *bool `mapstructure:"name"` + Begin *bool `mapstructure:"begin"` +} + +type TenvSettings struct { + All bool `mapstructure:"all"` +} + +type UseStdlibVarsSettings struct { + HTTPMethod bool `mapstructure:"http-method"` + HTTPStatusCode bool `mapstructure:"http-status-code"` + TimeWeekday bool `mapstructure:"time-weekday"` + TimeMonth bool `mapstructure:"time-month"` + TimeLayout bool `mapstructure:"time-layout"` + CryptoHash bool `mapstructure:"crypto-hash"` + DefaultRPCPath bool `mapstructure:"default-rpc-path"` + OSDevNull bool `mapstructure:"os-dev-null"` + SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` + TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` + ConstantKind bool `mapstructure:"constant-kind"` + SyslogPriority bool `mapstructure:"syslog-priority"` } type UnparamSettings struct { @@ -445,34 +873,79 @@ type UnparamSettings struct { Algo string } +type UnusedSettings struct { + FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"` + PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"` + ExportedIsUsed bool `mapstructure:"exported-is-used"` + ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"` + ParametersAreUsed bool `mapstructure:"parameters-are-used"` + LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"` + GeneratedIsUsed bool `mapstructure:"generated-is-used"` +} + type VarCheckSettings struct { CheckExportedFields bool `mapstructure:"exported-fields"` } +type VarnamelenSettings struct { + MaxDistance int `mapstructure:"max-distance"` + MinNameLength int `mapstructure:"min-name-length"` + CheckReceiver bool `mapstructure:"check-receiver"` + CheckReturn bool `mapstructure:"check-return"` + CheckTypeParam bool `mapstructure:"check-type-param"` + IgnoreNames []string `mapstructure:"ignore-names"` + IgnoreTypeAssertOk bool `mapstructure:"ignore-type-assert-ok"` + IgnoreMapIndexOk bool `mapstructure:"ignore-map-index-ok"` + IgnoreChanRecvOk bool `mapstructure:"ignore-chan-recv-ok"` + IgnoreDecls []string `mapstructure:"ignore-decls"` +} + type WhitespaceSettings struct { MultiIf bool `mapstructure:"multi-if"` MultiFunc bool `mapstructure:"multi-func"` } type WrapcheckSettings struct { - IgnoreSigs []string `mapstructure:"ignoreSigs"` + // TODO(ldez): v2 the options must be renamed to use hyphen. + IgnoreSigs []string `mapstructure:"ignoreSigs"` + IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"` + IgnorePackageGlobs []string `mapstructure:"ignorePackageGlobs"` + IgnoreInterfaceRegexps []string `mapstructure:"ignoreInterfaceRegexps"` } type WSLSettings struct { - StrictAppend bool `mapstructure:"strict-append"` - AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` - AllowAssignAndAnythingCuddle bool `mapstructure:"allow-assign-and-anything"` - AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` - AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` - AllowTrailingComment bool `mapstructure:"allow-trailing-comment"` - AllowSeparatedLeadingComment bool `mapstructure:"allow-separated-leading-comment"` - ForceCuddleErrCheckAndAssign bool `mapstructure:"force-err-cuddling"` - ForceExclusiveShortDeclarations bool `mapstructure:"force-short-decl-cuddling"` - ForceCaseTrailingWhitespaceLimit int `mapstructure:"force-case-trailing-whitespace"` -} - + StrictAppend bool `mapstructure:"strict-append"` + AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` + AllowAssignAndAnythingCuddle bool `mapstructure:"allow-assign-and-anything"` + AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + ForceCaseTrailingWhitespaceLimit int `mapstructure:"force-case-trailing-whitespace"` + AllowTrailingComment bool `mapstructure:"allow-trailing-comment"` + AllowSeparatedLeadingComment bool `mapstructure:"allow-separated-leading-comment"` + AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` + AllowCuddleWithCalls []string `mapstructure:"allow-cuddle-with-calls"` + AllowCuddleWithRHS []string `mapstructure:"allow-cuddle-with-rhs"` + ForceCuddleErrCheckAndAssign bool `mapstructure:"force-err-cuddling"` + ErrorVariableNames []string `mapstructure:"error-variable-names"` + ForceExclusiveShortDeclarations bool `mapstructure:"force-short-decl-cuddling"` +} + +// CustomLinterSettings encapsulates the meta-data of a private linter. +// For example, a private linter may be added to the golangci config file as shown below. +// +// linters-settings: +// custom: +// example: +// path: /example.so +// description: The description of the linter +// original-url: github.com/golangci/example-linter type CustomLinterSettings struct { - Path string + // Path to a plugin *.so file that implements the private linter. + Path string + // Description describes the purpose of the private linter. Description string + // OriginalURL The URL containing the source code for the private linter. OriginalURL string `mapstructure:"original-url"` + + // Settings plugin settings only work with linterdb.PluginConstructor symbol. + Settings any } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go deleted file mode 100644 index 34f8507581..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings_gocritic.go +++ /dev/null @@ -1,365 +0,0 @@ -package config - -import ( - "fmt" - "sort" - "strings" - - _ "github.com/go-critic/go-critic/checkers" // this import register checkers - "github.com/go-critic/go-critic/framework/linter" - "github.com/pkg/errors" - - "github.com/golangci/golangci-lint/pkg/logutils" -) - -const gocriticDebugKey = "gocritic" - -var ( - gocriticDebugf = logutils.Debug(gocriticDebugKey) - isGocriticDebug = logutils.HaveDebugTag(gocriticDebugKey) - allGocriticCheckers = linter.GetCheckersInfo() - allGocriticCheckerMap = func() map[string]*linter.CheckerInfo { - checkInfoMap := make(map[string]*linter.CheckerInfo) - for _, checkInfo := range allGocriticCheckers { - checkInfoMap[checkInfo.Name] = checkInfo - } - return checkInfoMap - }() -) - -type GocriticCheckSettings map[string]interface{} - -type GocriticSettings struct { - EnabledChecks []string `mapstructure:"enabled-checks"` - DisabledChecks []string `mapstructure:"disabled-checks"` - EnabledTags []string `mapstructure:"enabled-tags"` - DisabledTags []string `mapstructure:"disabled-tags"` - SettingsPerCheck map[string]GocriticCheckSettings `mapstructure:"settings"` - - inferredEnabledChecks map[string]bool -} - -func debugChecksListf(checks []string, format string, args ...interface{}) { - if isGocriticDebug { - prefix := fmt.Sprintf(format, args...) - gocriticDebugf(prefix+" checks (%d): %s", len(checks), sprintStrings(checks)) - } -} - -func stringsSliceToSet(ss []string) map[string]bool { - ret := map[string]bool{} - for _, s := range ss { - ret[s] = true - } - - return ret -} - -func buildGocriticTagToCheckersMap() map[string][]string { - tagToCheckers := map[string][]string{} - for _, checker := range allGocriticCheckers { - for _, tag := range checker.Tags { - tagToCheckers[tag] = append(tagToCheckers[tag], checker.Name) - } - } - return tagToCheckers -} - -func gocriticCheckerTagsDebugf() { - if !isGocriticDebug { - return - } - - tagToCheckers := buildGocriticTagToCheckersMap() - - var allTags []string - for tag := range tagToCheckers { - allTags = append(allTags, tag) - } - sort.Strings(allTags) - - gocriticDebugf("All gocritic existing tags and checks:") - for _, tag := range allTags { - debugChecksListf(tagToCheckers[tag], " tag %q", tag) - } -} - -func (s *GocriticSettings) gocriticDisabledCheckersDebugf() { - if !isGocriticDebug { - return - } - - var disabledCheckers []string - for _, checker := range allGocriticCheckers { - if s.inferredEnabledChecks[strings.ToLower(checker.Name)] { - continue - } - - disabledCheckers = append(disabledCheckers, checker.Name) - } - - if len(disabledCheckers) == 0 { - gocriticDebugf("All checks are enabled") - } else { - debugChecksListf(disabledCheckers, "Final not used") - } -} - -func (s *GocriticSettings) InferEnabledChecks(log logutils.Log) { - gocriticCheckerTagsDebugf() - - enabledByDefaultChecks := getDefaultEnabledGocriticCheckersNames() - debugChecksListf(enabledByDefaultChecks, "Enabled by default") - - disabledByDefaultChecks := getDefaultDisabledGocriticCheckersNames() - debugChecksListf(disabledByDefaultChecks, "Disabled by default") - - var enabledChecks []string - - // EnabledTags - if len(s.EnabledTags) != 0 { - tagToCheckers := buildGocriticTagToCheckersMap() - for _, tag := range s.EnabledTags { - enabledChecks = append(enabledChecks, tagToCheckers[tag]...) - } - debugChecksListf(enabledChecks, "Enabled by config tags %s", sprintStrings(s.EnabledTags)) - } - - if !(len(s.EnabledTags) == 0 && len(s.EnabledChecks) != 0) { - // don't use default checks only if we have no enabled tags and enable some checks manually - enabledChecks = append(enabledChecks, enabledByDefaultChecks...) - } - - // DisabledTags - if len(s.DisabledTags) != 0 { - enabledChecks = filterByDisableTags(enabledChecks, s.DisabledTags, log) - } - - // EnabledChecks - if len(s.EnabledChecks) != 0 { - debugChecksListf(s.EnabledChecks, "Enabled by config") - - alreadyEnabledChecksSet := stringsSliceToSet(enabledChecks) - for _, enabledCheck := range s.EnabledChecks { - if alreadyEnabledChecksSet[enabledCheck] { - log.Warnf("No need to enable check %q: it's already enabled", enabledCheck) - continue - } - enabledChecks = append(enabledChecks, enabledCheck) - } - } - - // DisabledChecks - if len(s.DisabledChecks) != 0 { - debugChecksListf(s.DisabledChecks, "Disabled by config") - - enabledChecksSet := stringsSliceToSet(enabledChecks) - for _, disabledCheck := range s.DisabledChecks { - if !enabledChecksSet[disabledCheck] { - log.Warnf("Gocritic check %q was explicitly disabled via config. However, as this check"+ - "is disabled by default, there is no need to explicitly disable it via config.", disabledCheck) - continue - } - delete(enabledChecksSet, disabledCheck) - } - - enabledChecks = nil - for enabledCheck := range enabledChecksSet { - enabledChecks = append(enabledChecks, enabledCheck) - } - } - - s.inferredEnabledChecks = map[string]bool{} - for _, check := range enabledChecks { - s.inferredEnabledChecks[strings.ToLower(check)] = true - } - - debugChecksListf(enabledChecks, "Final used") - s.gocriticDisabledCheckersDebugf() -} - -func validateStringsUniq(ss []string) error { - set := map[string]bool{} - for _, s := range ss { - _, ok := set[s] - if ok { - return fmt.Errorf("%q occurs multiple times in list", s) - } - set[s] = true - } - - return nil -} - -func intersectStringSlice(s1, s2 []string) []string { - s1Map := make(map[string]struct{}) - for _, s := range s1 { - s1Map[s] = struct{}{} - } - - result := make([]string, 0) - for _, s := range s2 { - if _, exists := s1Map[s]; exists { - result = append(result, s) - } - } - - return result -} - -func (s *GocriticSettings) Validate(log logutils.Log) error { - if len(s.EnabledTags) == 0 { - if len(s.EnabledChecks) != 0 && len(s.DisabledChecks) != 0 { - return errors.New("both enabled and disabled check aren't allowed for gocritic") - } - } else { - if err := validateStringsUniq(s.EnabledTags); err != nil { - return errors.Wrap(err, "validate enabled tags") - } - - tagToCheckers := buildGocriticTagToCheckersMap() - for _, tag := range s.EnabledTags { - if _, ok := tagToCheckers[tag]; !ok { - return fmt.Errorf("gocritic [enabled]tag %q doesn't exist", tag) - } - } - } - - if len(s.DisabledTags) > 0 { - tagToCheckers := buildGocriticTagToCheckersMap() - for _, tag := range s.EnabledTags { - if _, ok := tagToCheckers[tag]; !ok { - return fmt.Errorf("gocritic [disabled]tag %q doesn't exist", tag) - } - } - } - - if err := validateStringsUniq(s.EnabledChecks); err != nil { - return errors.Wrap(err, "validate enabled checks") - } - if err := validateStringsUniq(s.DisabledChecks); err != nil { - return errors.Wrap(err, "validate disabled checks") - } - - if err := s.validateCheckerNames(log); err != nil { - return errors.Wrap(err, "validation failed") - } - - return nil -} - -func (s *GocriticSettings) IsCheckEnabled(name string) bool { - return s.inferredEnabledChecks[strings.ToLower(name)] -} - -func sprintAllowedCheckerNames(allowedNames map[string]bool) string { - var namesSlice []string - for name := range allowedNames { - namesSlice = append(namesSlice, name) - } - return sprintStrings(namesSlice) -} - -func sprintStrings(ss []string) string { - sort.Strings(ss) - return fmt.Sprint(ss) -} - -// getAllCheckerNames returns a map containing all checker names supported by gocritic. -func getAllCheckerNames() map[string]bool { - allCheckerNames := map[string]bool{} - for _, checker := range allGocriticCheckers { - allCheckerNames[strings.ToLower(checker.Name)] = true - } - - return allCheckerNames -} - -func isEnabledByDefaultGocriticCheck(info *linter.CheckerInfo) bool { - return !info.HasTag("experimental") && - !info.HasTag("opinionated") && - !info.HasTag("performance") -} - -func getDefaultEnabledGocriticCheckersNames() []string { - var enabled []string - for _, info := range allGocriticCheckers { - enable := isEnabledByDefaultGocriticCheck(info) - if enable { - enabled = append(enabled, info.Name) - } - } - - return enabled -} - -func getDefaultDisabledGocriticCheckersNames() []string { - var disabled []string - for _, info := range allGocriticCheckers { - enable := isEnabledByDefaultGocriticCheck(info) - if !enable { - disabled = append(disabled, info.Name) - } - } - - return disabled -} - -func (s *GocriticSettings) validateCheckerNames(log logutils.Log) error { - allowedNames := getAllCheckerNames() - - for _, name := range s.EnabledChecks { - if !allowedNames[strings.ToLower(name)] { - return fmt.Errorf("enabled checker %s doesn't exist, all existing checkers: %s", - name, sprintAllowedCheckerNames(allowedNames)) - } - } - - for _, name := range s.DisabledChecks { - if !allowedNames[strings.ToLower(name)] { - return fmt.Errorf("disabled checker %s doesn't exist, all existing checkers: %s", - name, sprintAllowedCheckerNames(allowedNames)) - } - } - - for checkName := range s.SettingsPerCheck { - if _, ok := allowedNames[checkName]; !ok { - return fmt.Errorf("invalid setting, checker %s doesn't exist, all existing checkers: %s", - checkName, sprintAllowedCheckerNames(allowedNames)) - } - if !s.IsCheckEnabled(checkName) { - log.Warnf("Gocritic settings were provided for not enabled check %q", checkName) - } - } - - return nil -} - -func (s *GocriticSettings) GetLowercasedParams() map[string]GocriticCheckSettings { - ret := map[string]GocriticCheckSettings{} - for checker, params := range s.SettingsPerCheck { - ret[strings.ToLower(checker)] = params - } - return ret -} - -func filterByDisableTags(enabledChecks, disableTags []string, log logutils.Log) []string { - enabledChecksSet := stringsSliceToSet(enabledChecks) - for _, enabledCheck := range enabledChecks { - checkInfo, checkInfoExists := allGocriticCheckerMap[enabledCheck] - if !checkInfoExists { - log.Warnf("Gocritic check %q was not exists via filtering disabled tags", enabledCheck) - continue - } - hitTags := intersectStringSlice(checkInfo.Tags, disableTags) - if len(hitTags) != 0 { - delete(enabledChecksSet, enabledCheck) - } - debugChecksListf(enabledChecks, "Disabled by config tags %s", sprintStrings(disableTags)) - } - enabledChecks = nil - for enabledCheck := range enabledChecksSet { - enabledChecks = append(enabledChecks, enabledCheck) - } - return enabledChecks -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go index d67f110f67..e872639205 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/output.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -5,11 +5,13 @@ const ( OutFormatLineNumber = "line-number" OutFormatColoredLineNumber = "colored-line-number" OutFormatTab = "tab" + OutFormatColoredTab = "colored-tab" OutFormatCheckstyle = "checkstyle" OutFormatCodeClimate = "code-climate" OutFormatHTML = "html" OutFormatJunitXML = "junit-xml" OutFormatGithubActions = "github-actions" + OutFormatTeamCity = "teamcity" ) var OutFormats = []string{ @@ -22,15 +24,18 @@ var OutFormats = []string{ OutFormatHTML, OutFormatJunitXML, OutFormatGithubActions, + OutFormatTeamCity, } type Output struct { Format string - Color string PrintIssuedLine bool `mapstructure:"print-issued-lines"` PrintLinterName bool `mapstructure:"print-linter-name"` UniqByLine bool `mapstructure:"uniq-by-line"` SortResults bool `mapstructure:"sort-results"` PrintWelcomeMessage bool `mapstructure:"print-welcome"` PathPrefix string `mapstructure:"path-prefix"` + + // only work with CLI flags because the setup of logs is done before the config file parsing. + Color string } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go index 6e97277daa..0c4fa13b06 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/reader.go @@ -5,14 +5,16 @@ import ( "fmt" "os" "path/filepath" + "slices" "strings" + "github.com/go-viper/mapstructure/v2" "github.com/mitchellh/go-homedir" "github.com/spf13/viper" + "github.com/golangci/golangci-lint/pkg/exitcodes" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/sliceutil" ) type FileReader struct { @@ -36,15 +38,20 @@ func (r *FileReader) Read() error { configFile, err := r.parseConfigOption() if err != nil { - if err == errConfigDisabled { + if errors.Is(err, errConfigDisabled) { return nil } - return fmt.Errorf("can't parse --config option: %s", err) + return fmt.Errorf("can't parse --config option: %w", err) } if configFile != "" { viper.SetConfigFile(configFile) + + // Assume YAML if the file has no extension. + if filepath.Ext(configFile) == "" { + viper.SetConfigType("yaml") + } } else { r.setupConfigFileSearch() } @@ -58,7 +65,7 @@ func (r *FileReader) parseConfig() error { return nil } - return fmt.Errorf("can't read viper config: %s", err) + return fmt.Errorf("can't read viper config: %w", err) } usedConfigFile := viper.ConfigFileUsed() @@ -66,23 +73,43 @@ func (r *FileReader) parseConfig() error { return nil } - usedConfigFile, err := fsutils.ShortestRelPath(usedConfigFile, "") + if usedConfigFile == os.Stdin.Name() { + usedConfigFile = "" + r.log.Infof("Reading config file stdin") + } else { + var err error + usedConfigFile, err = fsutils.ShortestRelPath(usedConfigFile, "") + if err != nil { + r.log.Warnf("Can't pretty print config file path: %v", err) + } + + r.log.Infof("Used config file %s", usedConfigFile) + } + + usedConfigDir, err := filepath.Abs(filepath.Dir(usedConfigFile)) if err != nil { - r.log.Warnf("Can't pretty print config file path: %s", err) + return errors.New("can't get config directory") } - r.log.Infof("Used config file %s", usedConfigFile) + r.cfg.cfgDir = usedConfigDir + + if err := viper.Unmarshal(r.cfg, viper.DecodeHook(mapstructure.ComposeDecodeHookFunc( + // Default hooks (https://github.com/spf13/viper/blob/518241257478c557633ab36e474dfcaeb9a3c623/viper.go#L135-L138). + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), - if err := viper.Unmarshal(r.cfg); err != nil { - return fmt.Errorf("can't unmarshal config by viper: %s", err) + // Needed for forbidigo. + mapstructure.TextUnmarshallerHookFunc(), + ))); err != nil { + return fmt.Errorf("can't unmarshal config by viper: %w", err) } if err := r.validateConfig(); err != nil { - return fmt.Errorf("can't validate config: %s", err) + return fmt.Errorf("can't validate config: %w", err) } if r.cfg.InternalTest { // just for testing purposes: to detect config file usage fmt.Fprintln(logutils.StdOut, "test") - os.Exit(0) + os.Exit(exitcodes.Success) } return nil @@ -111,7 +138,7 @@ func (r *FileReader) validateConfig() error { } for i, rule := range c.Issues.ExcludeRules { if err := rule.Validate(); err != nil { - return fmt.Errorf("error in exclude rule #%d: %v", i, err) + return fmt.Errorf("error in exclude rule #%d: %w", i, err) } } if len(c.Severity.Rules) > 0 && c.Severity.Default == "" { @@ -119,11 +146,11 @@ func (r *FileReader) validateConfig() error { } for i, rule := range c.Severity.Rules { if err := rule.Validate(); err != nil { - return fmt.Errorf("error in severity rule #%d: %v", i, err) + return fmt.Errorf("error in severity rule #%d: %w", i, err) } } if err := c.LintersSettings.Govet.Validate(); err != nil { - return fmt.Errorf("error in govet config: %v", err) + return fmt.Errorf("error in govet config: %w", err) } return nil } @@ -184,7 +211,7 @@ func (r *FileReader) setupConfigFileSearch() { // find home directory for global config if home, err := homedir.Dir(); err != nil { r.log.Warnf("Can't get user's home directory: %s", err.Error()) - } else if !sliceutil.Contains(configSearchPaths, home) { + } else if !slices.Contains(configSearchPaths, home) { configSearchPaths = append(configSearchPaths, home) } @@ -205,7 +232,7 @@ func (r *FileReader) parseConfigOption() (string, error) { configFile := cfg.Run.Config if cfg.Run.NoConfig && configFile != "" { - return "", fmt.Errorf("can't combine option --config and --no-config") + return "", errors.New("can't combine option --config and --no-config") } if cfg.Run.NoConfig { @@ -214,7 +241,7 @@ func (r *FileReader) parseConfigOption() (string, error) { configFile, err := homedir.Expand(configFile) if err != nil { - return "", fmt.Errorf("failed to expand configuration path") + return "", errors.New("failed to expand configuration path") } return configFile, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go index ff6347945e..2bb21d9fa2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -2,6 +2,7 @@ package config import "time" +// Run encapsulates the config options for running the linter analysis. type Run struct { IsVerbose bool `mapstructure:"verbose"` Silent bool @@ -11,11 +12,13 @@ type Run struct { Concurrency int PrintResourcesUsage bool `mapstructure:"print-resources-usage"` - Config string + Config string // The path to the golangci config file, as specified with the --config argument. NoConfig bool Args []string + Go string `mapstructure:"go"` + BuildTags []string `mapstructure:"build-tags"` ModulesDownloadMode string `mapstructure:"modules-download-mode"` @@ -34,4 +37,6 @@ type Run struct { AllowParallelRunners bool `mapstructure:"allow-parallel-runners"` AllowSerialRunners bool `mapstructure:"allow-serial-runners"` + + ShowStats bool `mapstructure:"show-stats"` } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go index 536f903614..83331dbe7b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go @@ -1,14 +1,14 @@ package exitcodes const ( - Success = 0 - IssuesFound = 1 - WarningInTest = 2 - Failure = 3 - Timeout = 4 - NoGoFiles = 5 - NoConfigFileDetected = 6 - ErrorWasLogged = 7 + Success = iota + IssuesFound + WarningInTest + Failure + Timeout + NoGoFiles + NoConfigFileDetected + ErrorWasLogged ) type ExitError struct { @@ -30,5 +30,3 @@ var ( Code: Failure, } ) - -// 1 diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go index 2b17a03986..e8e5ba19b7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go @@ -2,11 +2,9 @@ package fsutils import ( "fmt" - "io/ioutil" + "os" "sync" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/logutils" ) @@ -24,9 +22,9 @@ func (fc *FileCache) GetFileBytes(filePath string) ([]byte, error) { return cachedBytes.([]byte), nil } - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) if err != nil { - return nil, errors.Wrapf(err, "can't read file %s", filePath) + return nil, fmt.Errorf("can't read file %s: %w", filePath, err) } fc.files.Store(filePath, fileBytes) @@ -56,7 +54,7 @@ func PrettifyBytesCount(n int64) string { func (fc *FileCache) PrintStats(log logutils.Log) { var size int64 var mapLen int - fc.files.Range(func(_, fileBytes interface{}) bool { + fc.files.Range(func(_, fileBytes any) bool { mapLen++ size += int64(len(fileBytes.([]byte))) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/files.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/files.go new file mode 100644 index 0000000000..4398ab9fc1 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/files.go @@ -0,0 +1,33 @@ +package fsutils + +import "path/filepath" + +// Files combines different operations related to handling file paths and content. +type Files struct { + *LineCache + pathPrefix string +} + +func NewFiles(lc *LineCache, pathPrefix string) *Files { + return &Files{ + LineCache: lc, + pathPrefix: pathPrefix, + } +} + +// WithPathPrefix takes a path that is relative to the current directory (as used in issues) +// and adds the configured path prefix, if there is one. +// The resulting path then can be shown to the user or compared against paths specified in the configuration. +func (f *Files) WithPathPrefix(relativePath string) string { + return WithPathPrefix(f.pathPrefix, relativePath) +} + +// WithPathPrefix takes a path that is relative to the current directory (as used in issues) +// and adds the configured path prefix, if there is one. +// The resulting path then can be shown to the user or compared against paths specified in the configuration. +func WithPathPrefix(pathPrefix, relativePath string) string { + if pathPrefix == "" { + return relativePath + } + return filepath.Join(pathPrefix, relativePath) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go index a39c105e43..715a3a4af0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -34,7 +34,7 @@ func Getwd() (string, error) { evaledWd, err := EvalSymlinks(cachedWd) if err != nil { - cachedWd, cachedWdError = "", fmt.Errorf("can't eval symlinks on wd %s: %s", cachedWd, err) + cachedWd, cachedWdError = "", fmt.Errorf("can't eval symlinks on wd %s: %w", cachedWd, err) return } @@ -70,13 +70,13 @@ func ShortestRelPath(path, wd string) (string, error) { var err error wd, err = Getwd() if err != nil { - return "", fmt.Errorf("can't get working directory: %s", err) + return "", fmt.Errorf("can't get working directory: %w", err) } } evaledPath, err := EvalSymlinks(path) if err != nil { - return "", fmt.Errorf("can't eval symlinks for path %s: %s", path, err) + return "", fmt.Errorf("can't eval symlinks for path %s: %w", path, err) } path = evaledPath @@ -92,7 +92,7 @@ func ShortestRelPath(path, wd string) (string, error) { relPath, err := filepath.Rel(wd, absPath) if err != nil { - return "", fmt.Errorf("can't get relative path for path %s and root %s: %s", + return "", fmt.Errorf("can't get relative path for path %s and root %s: %w", absPath, wd, err) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go index ab408e7d54..2e92264846 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/linecache.go @@ -4,8 +4,6 @@ import ( "bytes" "fmt" "sync" - - "github.com/pkg/errors" ) type fileLinesCache [][]byte @@ -21,7 +19,7 @@ func NewLineCache(fc *FileCache) *LineCache { } } -// GetLine returns a index1-th (1-based index) line from the file on filePath +// GetLine returns the index1-th (1-based index) line from the file on filePath func (lc *LineCache) GetLine(filePath string, index1 int) (string, error) { if index1 == 0 { // some linters, e.g. gosec can do it: it really means first line index1 = 1 @@ -39,7 +37,7 @@ func (lc *LineCache) GetLine(filePath string, index1 int) (string, error) { func (lc *LineCache) getRawLine(filePath string, index0 int) ([]byte, error) { fc, err := lc.getFileCache(filePath) if err != nil { - return nil, errors.Wrapf(err, "failed to get file %s lines cache", filePath) + return nil, fmt.Errorf("failed to get file %s lines cache: %w", filePath, err) } if index0 < 0 { @@ -61,7 +59,7 @@ func (lc *LineCache) getFileCache(filePath string) (fileLinesCache, error) { fileBytes, err := lc.fileCache.GetFileBytes(filePath) if err != nil { - return nil, errors.Wrapf(err, "can't get file %s bytes from cache", filePath) + return nil, fmt.Errorf("can't get file %s bytes from cache: %w", filePath, err) } fc := bytes.Split(fileBytes, []byte("\n")) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_unix.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_unix.go new file mode 100644 index 0000000000..2a171ecc0c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_unix.go @@ -0,0 +1,8 @@ +//go:build !windows + +package fsutils + +// NormalizePathInRegex it's a noop function on Unix. +func NormalizePathInRegex(path string) string { + return path +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_windows.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_windows.go new file mode 100644 index 0000000000..650aae1e16 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/path_windows.go @@ -0,0 +1,28 @@ +//go:build windows + +package fsutils + +import ( + "path/filepath" + "regexp" + "strings" +) + +var separatorToReplace = regexp.QuoteMeta(string(filepath.Separator)) + +// NormalizePathInRegex normalizes path in regular expressions. +// noop on Unix. +// This replacing should be safe because "/" are disallowed in Windows +// https://docs.microsoft.com/windows/win32/fileio/naming-a-file +func NormalizePathInRegex(path string) string { + // remove redundant character escape "\/" https://github.com/golangci/golangci-lint/issues/3277 + clean := regexp.MustCompile(`\\+/`). + ReplaceAllStringFunc(path, func(s string) string { + if strings.Count(s, "\\")%2 == 0 { + return s + } + return s[1:] + }) + + return strings.ReplaceAll(clean, "/", separatorToReplace) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint.go new file mode 100644 index 0000000000..67dde79918 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/alingse/asasalint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewAsasalint(setting *config.AsasalintSettings) *goanalysis.Linter { + cfg := asasalint.LinterSetting{} + if setting != nil { + cfg.Exclude = setting.Exclude + cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions + cfg.IgnoreTest = setting.IgnoreTest + } + + a, err := asasalint.NewAnalyzer(cfg) + if err != nil { + linterLogger.Fatalf("asasalint: create analyzer: %v", err) + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go index 1bf8c7b7da..7e7ee05e5c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/asciicheck.go @@ -8,12 +8,12 @@ import ( ) func NewAsciicheck() *goanalysis.Linter { + a := asciicheck.NewAnalyzer() + return goanalysis.NewLinter( - "asciicheck", - "Simple linter to check that your code does not contain non-ASCII identifiers", - []*analysis.Analyzer{ - asciicheck.NewAnalyzer(), - }, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk.go new file mode 100644 index 0000000000..ef266f55ca --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk.go @@ -0,0 +1,59 @@ +package golinters + +import ( + "strings" + + "github.com/breml/bidichk/pkg/bidichk" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewBiDiChkFuncName(cfg *config.BiDiChkSettings) *goanalysis.Linter { + a := bidichk.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if cfg != nil { + var opts []string + + if cfg.LeftToRightEmbedding { + opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING") + } + if cfg.RightToLeftEmbedding { + opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING") + } + if cfg.PopDirectionalFormatting { + opts = append(opts, "POP-DIRECTIONAL-FORMATTING") + } + if cfg.LeftToRightOverride { + opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE") + } + if cfg.RightToLeftOverride { + opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE") + } + if cfg.LeftToRightIsolate { + opts = append(opts, "LEFT-TO-RIGHT-ISOLATE") + } + if cfg.RightToLeftIsolate { + opts = append(opts, "RIGHT-TO-LEFT-ISOLATE") + } + if cfg.FirstStrongIsolate { + opts = append(opts, "FIRST-STRONG-ISOLATE") + } + if cfg.PopDirectionalIsolate { + opts = append(opts, "POP-DIRECTIONAL-ISOLATE") + } + + cfgMap[a.Name] = map[string]any{ + "disallowed-runes": strings.Join(opts, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + "Checks for dangerous unicode character sequences", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go index 0e03813d1a..e56bd83f28 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose.go @@ -8,14 +8,10 @@ import ( ) func NewBodyclose() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - bodyclose.Analyzer, - } - return goanalysis.NewLinter( "bodyclose", "checks whether HTTP response body is closed successfully", - analyzers, + []*analysis.Analyzer{bodyclose.Analyzer}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/commons.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/commons.go new file mode 100644 index 0000000000..3b40e59bfe --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/commons.go @@ -0,0 +1,6 @@ +package golinters + +import "github.com/golangci/golangci-lint/pkg/logutils" + +// linterLogger must be use only when the context logger is not available. +var linterLogger = logutils.NewStderrLog(logutils.DebugKeyLinter) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/containedctx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/containedctx.go new file mode 100644 index 0000000000..8f7859af7d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/containedctx.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/sivchari/containedctx" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewContainedCtx() *goanalysis.Linter { + a := containedctx.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/contextcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/contextcheck.go new file mode 100644 index 0000000000..f54192a189 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/contextcheck.go @@ -0,0 +1,22 @@ +package golinters + +import ( + "github.com/kkHAIKE/contextcheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +func NewContextCheck() *goanalysis.Linter { + analyzer := contextcheck.NewAnalyzer(contextcheck.Configuration{}) + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + analyzer.Run = contextcheck.NewRun(lintCtx.Packages, false) + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go index 6f55b27975..f76c55552c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop.go @@ -8,14 +8,12 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -const cyclopName = "cyclop" - func NewCyclop(settings *config.Cyclop) *goanalysis.Linter { a := analyzer.NewAnalyzer() - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - d := map[string]interface{}{ + d := map[string]any{ "skipTests": settings.SkipTests, } @@ -27,11 +25,11 @@ func NewCyclop(settings *config.Cyclop) *goanalysis.Linter { d["packageAverage"] = settings.PackageAverage } - cfg = map[string]map[string]interface{}{a.Name: d} + cfg = map[string]map[string]any{a.Name: d} } return goanalysis.NewLinter( - cyclopName, + a.Name, "checks function and package cyclomatic complexity", []*analysis.Analyzer{a}, cfg, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go index 6ff38909fb..4f563c3813 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go @@ -12,28 +12,36 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +const deadcodeName = "deadcode" + func NewDeadcode() *goanalysis.Linter { - const linterName = "deadcode" var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: deadcodeName, Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (interface{}, error) { + Run: func(pass *analysis.Pass) (any, error) { prog := goanalysis.MakeFakeLoaderProgram(pass) + issues, err := deadcodeAPI.Run(prog) if err != nil { return nil, err } + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { res = append(res, goanalysis.NewIssue(&result.Issue{ Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.UnusedIdentName, nil)), - FromLinter: linterName, + FromLinter: deadcodeName, }, pass)) } + + if len(issues) == 0 { + return nil, nil + } + mu.Lock() resIssues = append(resIssues, res...) mu.Unlock() @@ -41,8 +49,9 @@ func NewDeadcode() *goanalysis.Linter { return nil, nil }, } + return goanalysis.NewLinter( - linterName, + deadcodeName, "Finds unused code", []*analysis.Analyzer{analyzer}, nil, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/decorder.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/decorder.go new file mode 100644 index 0000000000..5202a03a4f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/decorder.go @@ -0,0 +1,44 @@ +package golinters + +import ( + "strings" + + "gitlab.com/bosi/decorder" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewDecorder(settings *config.DecorderSettings) *goanalysis.Linter { + a := decorder.Analyzer + + // disable all rules/checks by default + cfg := map[string]any{ + "ignore-underscore-vars": false, + "disable-dec-num-check": true, + "disable-type-dec-num-check": false, + "disable-const-dec-num-check": false, + "disable-var-dec-num-check": false, + "disable-dec-order-check": true, + "disable-init-func-first-check": true, + } + + if settings != nil { + cfg["dec-order"] = strings.Join(settings.DecOrder, ",") + cfg["ignore-underscore-vars"] = settings.IgnoreUnderscoreVars + cfg["disable-dec-num-check"] = settings.DisableDecNumCheck + cfg["disable-type-dec-num-check"] = settings.DisableTypeDecNumCheck + cfg["disable-const-dec-num-check"] = settings.DisableConstDecNumCheck + cfg["disable-var-dec-num-check"] = settings.DisableVarDecNumCheck + cfg["disable-dec-order-check"] = settings.DisableDecOrderCheck + cfg["disable-init-func-first-check"] = settings.DisableInitFuncFirstCheck + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + map[string]map[string]any{a.Name: cfg}, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go index aa372e9568..49e471df82 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go @@ -1,112 +1,50 @@ package golinters import ( - "fmt" - "strings" - "sync" - - "github.com/OpenPeeDeeP/depguard" + "github.com/OpenPeeDeeP/depguard/v2" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/loader" //nolint:staticcheck // require changes in github.com/OpenPeeDeeP/depguard + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -func setDepguardListType(dg *depguard.Depguard, lintCtx *linter.Context) error { - listType := lintCtx.Settings().Depguard.ListType - var found bool - dg.ListType, found = depguard.StringToListType[strings.ToLower(listType)] - if !found { - if listType != "" { - return fmt.Errorf("unsure what list type %s is", listType) - } - dg.ListType = depguard.LTBlacklist - } - - return nil -} +func NewDepguard(settings *config.DepGuardSettings) *goanalysis.Linter { + conf := depguard.LinterSettings{} -func setupDepguardPackages(dg *depguard.Depguard, lintCtx *linter.Context) { - if dg.ListType == depguard.LTBlacklist { - // if the list type was a blacklist the packages with error messages should - // be included in the blacklist package list + if settings != nil { + for s, rule := range settings.Rules { + list := &depguard.List{ + ListMode: rule.ListMode, + Files: rule.Files, + Allow: rule.Allow, + } - noMessagePackages := make(map[string]bool) - for _, pkg := range dg.Packages { - noMessagePackages[pkg] = true - } + // because of bug with Viper parsing (split on dot) we use a list of struct instead of a map. + // https://github.com/spf13/viper/issues/324 + // https://github.com/golangci/golangci-lint/issues/3749#issuecomment-1492536630 - for pkg := range lintCtx.Settings().Depguard.PackagesWithErrorMessage { - if _, ok := noMessagePackages[pkg]; !ok { - dg.Packages = append(dg.Packages, pkg) + deny := map[string]string{} + for _, r := range rule.Deny { + deny[r.Pkg] = r.Desc } + list.Deny = deny + + conf[s] = list } } -} -func NewDepguard() *goanalysis.Linter { - const linterName = "depguard" - var mu sync.Mutex - var resIssues []goanalysis.Issue + a := depguard.NewUncompiledAnalyzer(&conf) - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } return goanalysis.NewLinter( - linterName, - "Go linter that checks if package imports are in a list of acceptable packages", - []*analysis.Analyzer{analyzer}, + a.Analyzer.Name, + a.Analyzer.Doc, + []*analysis.Analyzer{a.Analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - dgSettings := &lintCtx.Settings().Depguard - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) - dg := &depguard.Depguard{ - Packages: dgSettings.Packages, - IncludeGoRoot: dgSettings.IncludeGoRoot, - } - if err := setDepguardListType(dg, lintCtx); err != nil { - return nil, err - } - setupDepguardPackages(dg, lintCtx) - - loadConfig := &loader.Config{ - Cwd: "", // fallbacked to os.Getcwd - Build: nil, // fallbacked to build.Default - } - issues, err := dg.Run(loadConfig, prog) - if err != nil { - return nil, err - } - if len(issues) == 0 { - return nil, nil - } - msgSuffix := "is in the blacklist" - if dg.ListType == depguard.LTWhitelist { - msgSuffix = "is not in the whitelist" - } - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - userSuppliedMsgSuffix := dgSettings.PackagesWithErrorMessage[i.PackageName] - if userSuppliedMsgSuffix != "" { - userSuppliedMsgSuffix = ": " + userSuppliedMsgSuffix - } - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: i.Position, - Text: fmt.Sprintf("%s %s%s", formatCode(i.PackageName, lintCtx.Cfg), msgSuffix, userSuppliedMsgSuffix), - FromLinter: linterName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil + err := a.Compile() + if err != nil { + lintCtx.Log.Errorf("create analyzer: %v", err) } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go index 8978ff913d..79502fe8be 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go @@ -8,51 +8,65 @@ import ( "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const dogsledLinterName = "dogsled" +const dogsledName = "dogsled" -func NewDogsled() *goanalysis.Linter { +//nolint:dupl +func NewDogsled(settings *config.DogsledSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: dogsledLinterName, + Name: dogsledName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - dogsledLinterName, - "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var pkgIssues []goanalysis.Issue - for _, f := range pass.Files { - v := returnsVisitor{ - maxBlanks: lintCtx.Settings().Dogsled.MaxBlankIdentifiers, - f: pass.Fset, - } - ast.Walk(&v, f) - for i := range v.issues { - pkgIssues = append(pkgIssues, goanalysis.NewIssue(&v.issues[i], pass)) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runDogsled(pass, settings) + + if len(issues) == 0 { + return nil, nil } mu.Lock() - resIssues = append(resIssues, pkgIssues...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + dogsledName, + "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } +func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue { + var reports []goanalysis.Issue + for _, f := range pass.Files { + v := &returnsVisitor{ + maxBlanks: settings.MaxBlankIdentifiers, + f: pass.Fset, + } + + ast.Walk(v, f) + + for i := range v.issues { + reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass)) + } + } + + return reports +} + type returnsVisitor struct { f *token.FileSet maxBlanks int @@ -87,7 +101,7 @@ func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { if numBlank > v.maxBlanks { v.issues = append(v.issues, result.Issue{ - FromLinter: dogsledLinterName, + FromLinter: dogsledName, Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), Pos: v.f.Position(assgnStmt.Pos()), }) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go index ed1c4fcbdc..5d772a5f2f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go @@ -6,39 +6,27 @@ import ( "sync" duplAPI "github.com/golangci/dupl" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const duplLinterName = "dupl" +const duplName = "dupl" -func NewDupl() *goanalysis.Linter { +//nolint:dupl +func NewDupl(settings *config.DuplSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: duplLinterName, + Name: duplName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - duplLinterName, - "Tool for code clone detection", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - issues, err := duplAPI.Run(fileNames, lintCtx.Settings().Dupl.Threshold) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runDupl(pass, settings) if err != nil { return nil, err } @@ -47,37 +35,62 @@ func NewDupl() *goanalysis.Linter { return nil, nil } - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") - if err != nil { - return nil, errors.Wrapf(err, "failed to get shortest rel path for %q", i.To.Filename()) - } - dupl := fmt.Sprintf("%s:%d-%d", toFilename, i.To.LineStart(), i.To.LineEnd()) - text := fmt.Sprintf("%d-%d lines are duplicate of %s", - i.From.LineStart(), i.From.LineEnd(), - formatCode(dupl, lintCtx.Cfg)) - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.From.Filename(), - Line: i.From.LineStart(), - }, - LineRange: &result.Range{ - From: i.From.LineStart(), - To: i.From.LineEnd(), - }, - Text: text, - FromLinter: duplLinterName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + duplName, + "Tool for code clone detection", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + issues, err := duplAPI.Run(fileNames, settings.Threshold) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + res := make([]goanalysis.Issue, 0, len(issues)) + + for _, i := range issues { + toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") + if err != nil { + return nil, fmt.Errorf("failed to get shortest rel path for %q: %w", i.To.Filename(), err) + } + + dupl := fmt.Sprintf("%s:%d-%d", toFilename, i.To.LineStart(), i.To.LineEnd()) + text := fmt.Sprintf("%d-%d lines are duplicate of %s", + i.From.LineStart(), i.From.LineEnd(), + formatCode(dupl, nil)) + + res = append(res, goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.From.Filename(), + Line: i.From.LineStart(), + }, + LineRange: &result.Range{ + From: i.From.LineStart(), + To: i.From.LineEnd(), + }, + Text: text, + FromLinter: duplName, + }, pass)) + } + + return res, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword.go new file mode 100644 index 0000000000..6f079ffc89 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "strings" + + "github.com/Abirdcfly/dupword" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewDupWord(setting *config.DupWordSettings) *goanalysis.Linter { + a := dupword.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if setting != nil { + cfgMap[a.Name] = map[string]any{ + "keyword": strings.Join(setting.Keywords, ","), + "ignore": strings.Join(setting.Ignore, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + "checks for duplicate words in the source code", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go index 9c452af50e..880de5d420 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/durationcheck.go @@ -10,6 +10,10 @@ import ( func NewDurationCheck() *goanalysis.Linter { a := durationcheck.Analyzer - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). - WithLoadMode(goanalysis.LoadModeTypesInfo) + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go index 9aac7326a0..89b18519c9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go @@ -11,7 +11,6 @@ import ( "sync" "github.com/kisielk/errcheck/errcheck" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -22,26 +21,27 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -func NewErrcheck() *goanalysis.Linter { - const linterName = "errcheck" +const errcheckName = "errcheck" +func NewErrcheck(settings *config.ErrcheckSettings) *goanalysis.Linter { var mu sync.Mutex - var res []goanalysis.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: errcheckName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } return goanalysis.NewLinter( - linterName, - "Errcheck is a program for checking for unchecked errors "+ - "in go programs. These unchecked errors can be critical bugs in some cases", + errcheckName, + "errcheck is a program for checking for unchecked errors in Go code. "+ + "These unchecked errors can be critical bugs in some cases", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { // copied from errcheck - checker, err := getChecker(&lintCtx.Settings().Errcheck) + checker, err := getChecker(settings) if err != nil { lintCtx.Log.Errorf("failed to get checker: %v", err) return @@ -49,52 +49,67 @@ func NewErrcheck() *goanalysis.Linter { checker.Tags = lintCtx.Cfg.Run.BuildTags - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - pkg := &packages.Package{ - Fset: pass.Fset, - Syntax: pass.Files, - Types: pass.Pkg, - TypesInfo: pass.TypesInfo, + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues := runErrCheck(lintCtx, pass, checker) + if err != nil { + return nil, err } - errcheckIssues := checker.CheckPackage(pkg).Unique() - if len(errcheckIssues.UncheckedErrors) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, len(errcheckIssues.UncheckedErrors)) - for i, err := range errcheckIssues.UncheckedErrors { - var text string - if err.FuncName != "" { - text = fmt.Sprintf( - "Error return value of %s is not checked", - formatCode(err.SelectorName, lintCtx.Cfg), - ) - } else { - text = "Error return value is not checked" - } - - issues[i] = goanalysis.NewIssue( - &result.Issue{ - FromLinter: linterName, - Text: text, - Pos: err.Pos, - }, - pass, - ) - } - mu.Lock() - res = append(res, issues...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil } }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res + return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } +func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck.Checker) []goanalysis.Issue { + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + lintIssues := checker.CheckPackage(pkg).Unique() + if len(lintIssues.UncheckedErrors) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues.UncheckedErrors)) + + for i, err := range lintIssues.UncheckedErrors { + text := "Error return value is not checked" + + if err.FuncName != "" { + code := err.SelectorName + if err.SelectorName == "" { + code = err.FuncName + } + + text = fmt.Sprintf("Error return value of %s is not checked", formatCode(code, lintCtx.Cfg)) + } + + issues[i] = goanalysis.NewIssue( + &result.Issue{ + FromLinter: errcheckName, + Text: text, + Pos: err.Pos, + }, + pass, + ) + } + + return issues +} + // parseIgnoreConfig was taken from errcheck in order to keep the API identical. // https://github.com/kisielk/errcheck/blob/1787c4bee836470bf45018cfbc783650db3c6501/main.go#L25-L60 func parseIgnoreConfig(s string) (map[string]*regexp.Regexp, error) { @@ -127,7 +142,7 @@ func parseIgnoreConfig(s string) (map[string]*regexp.Regexp, error) { func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { ignoreConfig, err := parseIgnoreConfig(errCfg.Ignore) if err != nil { - return nil, errors.Wrap(err, "failed to parse 'ignore' directive") + return nil, fmt.Errorf("failed to parse 'ignore' directive: %w", err) } checker := errcheck.Checker{ @@ -135,10 +150,13 @@ func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { BlankAssignments: !errCfg.CheckAssignToBlank, TypeAssertions: !errCfg.CheckTypeAssertions, SymbolRegexpsByPackage: map[string]*regexp.Regexp{}, - Symbols: append([]string{}, errcheck.DefaultExcludedSymbols...), }, } + if !errCfg.DisableDefaultExclusions { + checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, errcheck.DefaultExcludedSymbols...) + } + for pkg, re := range ignoreConfig { checker.Exclusions.SymbolRegexpsByPackage[pkg] = re } @@ -152,6 +170,8 @@ func getChecker(errCfg *config.ErrcheckSettings) (*errcheck.Checker, error) { checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, exclude...) } + checker.Exclusions.Symbols = append(checker.Exclusions.Symbols, errCfg.ExcludeFunctions...) + return &checker, nil } @@ -231,7 +251,7 @@ func readExcludeFile(name string) ([]string, error) { } if fh == nil { - return nil, errors.Wrapf(err, "failed reading exclude file: %s", name) + return nil, fmt.Errorf("failed reading exclude file: %s: %w", name, err) } scanner := bufio.NewScanner(fh) @@ -242,7 +262,7 @@ func readExcludeFile(name string) ([]string, error) { } if err := scanner.Err(); err != nil { - return nil, errors.Wrapf(err, "failed scanning file: %s", name) + return nil, fmt.Errorf("failed scanning file: %s: %w", name, err) } return excludes, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson.go new file mode 100644 index 0000000000..1af4450b4e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "github.com/breml/errchkjson" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewErrChkJSONFuncName(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { + a := errchkjson.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + cfgMap[a.Name] = map[string]any{ + "omit-safe": true, + } + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + "omit-safe": !cfg.CheckErrorFreeEncoding, + "report-no-exported": cfg.ReportNoExported, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go new file mode 100644 index 0000000000..193a7aba75 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errname.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/Antonboom/errname/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewErrName() *goanalysis.Linter { + a := analyzer.New() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go index dd9d901617..cac94159d6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint.go @@ -11,13 +11,14 @@ import ( func NewErrorLint(cfg *config.ErrorLintSettings) *goanalysis.Linter { a := errorlint.NewAnalyzer() - cfgMap := map[string]map[string]interface{}{} + cfgMap := map[string]map[string]any{} if cfg != nil { - cfgMap[a.Name] = map[string]interface{}{ - "errorf": cfg.Errorf, - "asserts": cfg.Asserts, - "comparison": cfg.Comparison, + cfgMap[a.Name] = map[string]any{ + "errorf": cfg.Errorf, + "errorf-multi": cfg.ErrorfMulti, + "asserts": cfg.Asserts, + "comparison": cfg.Comparison, } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery.go new file mode 100644 index 0000000000..9911d315e0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/lufeee/execinquery" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExecInQuery() *goanalysis.Linter { + a := execinquery.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go index 85534d42cd..fe58e10f05 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustive.go @@ -11,16 +11,27 @@ import ( func NewExhaustive(settings *config.ExhaustiveSettings) *goanalysis.Linter { a := exhaustive.Analyzer - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - cfg = map[string]map[string]interface{}{ + cfg = map[string]map[string]any{ a.Name: { + exhaustive.CheckFlag: settings.Check, exhaustive.CheckGeneratedFlag: settings.CheckGenerated, exhaustive.DefaultSignifiesExhaustiveFlag: settings.DefaultSignifiesExhaustive, + exhaustive.IgnoreEnumMembersFlag: settings.IgnoreEnumMembers, + exhaustive.IgnoreEnumTypesFlag: settings.IgnoreEnumTypes, + exhaustive.PackageScopeOnlyFlag: settings.PackageScopeOnly, + exhaustive.ExplicitExhaustiveMapFlag: settings.ExplicitExhaustiveMap, + exhaustive.ExplicitExhaustiveSwitchFlag: settings.ExplicitExhaustiveSwitch, + exhaustive.DefaultCaseRequiredFlag: settings.DefaultCaseRequired, }, } } - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). - WithLoadMode(goanalysis.LoadModeTypesInfo) + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go index 6a1dbd71c5..9bc9bbfb0b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustivestruct.go @@ -13,9 +13,9 @@ import ( func NewExhaustiveStruct(settings *config.ExhaustiveStructSettings) *goanalysis.Linter { a := analyzer.Analyzer - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - cfg = map[string]map[string]interface{}{ + cfg = map[string]map[string]any{ a.Name: { "struct_patterns": strings.Join(settings.StructPatterns, ","), }, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustruct.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustruct.go new file mode 100644 index 0000000000..5272879e1d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/exhaustruct.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewExhaustruct(settings *config.ExhaustructSettings) *goanalysis.Linter { + var include, exclude []string + if settings != nil { + include = settings.Include + exclude = settings.Exclude + } + + a, err := analyzer.NewAnalyzer(include, exclude) + if err != nil { + linterLogger.Fatalf("exhaustruct configuration: %v", err) + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go index 2fa9d51833..6aced29226 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo.go @@ -1,70 +1,104 @@ package golinters import ( + "fmt" "sync" "github.com/ashanbrown/forbidigo/forbidigo" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -func NewForbidigo() *goanalysis.Linter { - const linterName = "forbidigo" +const forbidigoName = "forbidigo" + +//nolint:dupl +func NewForbidigo(settings *config.ForbidigoSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: forbidigoName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Forbids identifiers", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - s := &lintCtx.Settings().Forbidigo - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - options := []forbidigo.Option{ - forbidigo.OptionExcludeGodocExamples(s.ExcludeGodocExamples), - // disable "//permit" directives so only "//nolint" directives matters within golangci lint - forbidigo.OptionIgnorePermitDirectives(true), - } - forbid, err := forbidigo.NewLinter(s.Forbid, options...) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runForbidigo(pass, settings) if err != nil { - return nil, errors.Wrapf(err, "failed to create linter %q", linterName) + return nil, err } - for _, file := range pass.Files { - hints, err := forbid.Run(pass.Fset, file) - if err != nil { - return nil, errors.Wrapf(err, "forbidigo linter failed on file %q", file.Name.String()) - } - for _, hint := range hints { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) - } - } - - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + // Without AnalyzeTypes, LoadModeSyntax is enough. + // But we cannot make this depend on the settings and have to mirror the mode chosen in GetAllSupportedLinterConfigs, + // therefore we have to use LoadModeTypesInfo in all cases. + return goanalysis.NewLinter( + forbidigoName, + "Forbids identifiers", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) { + options := []forbidigo.Option{ + forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples), + // disable "//permit" directives so only "//nolint" directives matters within golangci-lint + forbidigo.OptionIgnorePermitDirectives(true), + forbidigo.OptionAnalyzeTypes(settings.AnalyzeTypes), + } + + // Convert patterns back to strings because that is what NewLinter accepts. + var patterns []string + for _, pattern := range settings.Forbid { + buffer, err := pattern.MarshalString() + if err != nil { + return nil, err + } + patterns = append(patterns, string(buffer)) + } + + forbid, err := forbidigo.NewLinter(patterns, options...) + if err != nil { + return nil, fmt.Errorf("failed to create linter %q: %w", forbidigoName, err) + } + + var issues []goanalysis.Issue + for _, file := range pass.Files { + runConfig := forbidigo.RunConfig{ + Fset: pass.Fset, + DebugLog: logutils.Debug(logutils.DebugKeyForbidigo), + } + if settings != nil && settings.AnalyzeTypes { + runConfig.TypesInfo = pass.TypesInfo + } + hints, err := forbid.RunWithConfig(runConfig, file) + if err != nil { + return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) + } + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: forbidigoName, + }, pass)) + } + } + + return issues, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go index 29cb6b7ef7..8def9c1f67 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go @@ -8,57 +8,69 @@ import ( "github.com/ultraware/funlen" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const funlenLinterName = "funlen" +const funlenName = "funlen" -func NewFunlen() *goanalysis.Linter { +//nolint:dupl +func NewFunlen(settings *config.FunlenSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: funlenLinterName, + Name: funlenName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - funlenLinterName, - "Tool for detection of long functions", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []funlen.Message - for _, file := range pass.Files { - fileIssues := funlen.Run(file, pass.Fset, lintCtx.Settings().Funlen.Lines, lintCtx.Settings().Funlen.Statements) - issues = append(issues, fileIssues...) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runFunlen(pass, settings) if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - res[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: funlenLinterName, - }, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + funlenName, + "Tool for detection of long functions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue { + var lintIssues []funlen.Message + for _, file := range pass.Files { + fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments) + lintIssues = append(lintIssues, fileIssues...) + } + + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issues[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: funlenName, + }, pass) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go index 6fa43544e6..3862267692 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci.go @@ -1,79 +1,69 @@ package golinters import ( - "bytes" "fmt" "sync" + gcicfg "github.com/daixiang0/gci/pkg/config" "github.com/daixiang0/gci/pkg/gci" - "github.com/pkg/errors" - "github.com/shazow/go-diff/difflib" + "github.com/daixiang0/gci/pkg/io" + "github.com/daixiang0/gci/pkg/log" + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const gciName = "gci" -func NewGci() *goanalysis.Linter { +func NewGci(settings *config.GciSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - differ := difflib.New() analyzer := &analysis.Analyzer{ Name: gciName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + + var cfg *gcicfg.Config + if settings != nil { + rawCfg := gcicfg.YamlConfig{ + Cfg: gcicfg.BoolConfig{ + SkipGenerated: settings.SkipGenerated, + CustomOrder: settings.CustomOrder, + }, + SectionStrings: settings.Sections, + } + + if settings.LocalPrefixes != "" { + prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)} + rawCfg.SectionStrings = prefix + } + + var err error + cfg, err = rawCfg.Parse() + if err != nil { + linterLogger.Fatalf("gci: configuration parsing: %v", err) + } + } + + var lock sync.Mutex + return goanalysis.NewLinter( gciName, - "Gci control golang package import order and make it always deterministic.", + "Gci controls Go package import order and makes it always deterministic.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - localFlag := lintCtx.Settings().Gci.LocalPrefixes - goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes - if localFlag == "" && goimportsFlag != "" { - localFlag = goimportsFlag - } - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - source, result, err := gci.Run(f, &gci.FlagSet{LocalFlag: localFlag}) - if err != nil { - return nil, err - } - if result == nil { - continue - } - - diff := bytes.Buffer{} - _, err = diff.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) - if err != nil { - return nil, fmt.Errorf("can't write diff header: %v", err) - } - - err = differ.Diff(&diff, bytes.NewReader(source), bytes.NewReader(result)) - if err != nil { - return nil, fmt.Errorf("can't get gci diff output: %v", err) - } - - is, err := extractIssuesFromPatch(diff.String(), lintCtx.Log, lintCtx, gciName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gci diff output %q", diff.String()) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGci(pass, lintCtx, cfg, &lock) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -90,3 +80,78 @@ func NewGci() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var diffs []string + err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock) + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + + for _, diff := range diffs { + if diff == "" { + continue + } + + is, err := extractIssuesFromPatch(diff, lintCtx, gciName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} + +// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator. +// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI. +// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75 +// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80 +func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error { + log.InitLogger() + defer func() { _ = log.L().Sync() }() + + return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { + fileURI := span.URIFromPath(filePath) + edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) + unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) + lock.Lock() + *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) + lock.Unlock() + return nil + }) +} + +func getErrorTextForGci(settings config.GciSettings) string { + text := "File is not `gci`-ed" + + hasOptions := settings.SkipGenerated || len(settings.Sections) > 0 + if !hasOptions { + return text + } + + text += " with" + + if settings.SkipGenerated { + text += " --skip-generated" + } + + if len(settings.Sections) > 0 { + for _, section := range settings.Sections { + text += " -s " + section + } + } + + if settings.CustomOrder { + text += " --custom-order" + } + + return text +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter.go new file mode 100644 index 0000000000..8919de15bf --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter.go @@ -0,0 +1,34 @@ +package golinters + +import ( + "github.com/nunnatsa/ginkgolinter" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGinkgoLinter(cfg *config.GinkgoLinterSettings) *goanalysis.Linter { + a := ginkgolinter.NewAnalyzer() + + cfgMap := make(map[string]map[string]any) + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + "suppress-len-assertion": cfg.SuppressLenAssertion, + "suppress-nil-assertion": cfg.SuppressNilAssertion, + "suppress-err-assertion": cfg.SuppressErrAssertion, + "suppress-compare-assertion": cfg.SuppressCompareAssertion, + "suppress-async-assertion": cfg.SuppressAsyncAssertion, + "suppress-type-compare-assertion": cfg.SuppressTypeCompareWarning, + "forbid-focus-container": cfg.ForbidFocusContainer, + "allow-havelen-0": cfg.AllowHaveLenZero, + } + } + + return goanalysis.NewLinter( + a.Name, + "enforces standards of using ginkgo and gomega", + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go index b702d1660b..284215dfc6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/adapters.go @@ -8,17 +8,22 @@ import ( ) func MakeFakeLoaderProgram(pass *analysis.Pass) *loader.Program { + var info types.Info + if pass.TypesInfo != nil { + info = *pass.TypesInfo + } + prog := &loader.Program{ Fset: pass.Fset, Created: []*loader.PackageInfo{ { Pkg: pass.Pkg, Importable: true, // not used - TransitivelyErrorFree: true, // TODO + TransitivelyErrorFree: true, // TODO ??? Files: pass.Files, Errors: nil, - Info: *pass.TypesInfo, + Info: info, }, }, AllPackages: map[*types.Package]*loader.PackageInfo{ @@ -28,7 +33,7 @@ func MakeFakeLoaderProgram(pass *analysis.Pass) *loader.Program { TransitivelyErrorFree: true, Files: pass.Files, Errors: nil, - Info: *pass.TypesInfo, + Info: info, }, }, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go index 13b9ccf0af..f59e02cc64 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/errors.go @@ -1,9 +1,9 @@ package goanalysis import ( + "errors" "fmt" - "github.com/pkg/errors" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/pkg/lint/linter" diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go index ef49e4284a..f8ca2e7553 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go @@ -2,11 +2,11 @@ package goanalysis import ( "context" + "errors" "flag" "fmt" "strings" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/lint/linter" @@ -44,14 +44,14 @@ const ( type Linter struct { name, desc string analyzers []*analysis.Analyzer - cfg map[string]map[string]interface{} + cfg map[string]map[string]any issuesReporter func(*linter.Context) []Issue contextSetter func(*linter.Context) loadMode LoadMode needUseOriginalPackages bool } -func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]interface{}) *Linter { +func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]any) *Linter { return &Linter{name: name, desc: desc, analyzers: analyzers, cfg: cfg} } @@ -102,13 +102,13 @@ func (lnt *Linter) allAnalyzerNames() []string { return ret } -func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]interface{}) error { +func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]any) error { for k, v := range cfg { f := a.Flags.Lookup(k) if f == nil { validFlagNames := allFlagNames(&a.Flags) if len(validFlagNames) == 0 { - return fmt.Errorf("analyzer doesn't have settings") + return errors.New("analyzer doesn't have settings") } return fmt.Errorf("analyzer doesn't have setting %q, valid settings: %v", @@ -116,7 +116,7 @@ func (lnt *Linter) configureAnalyzer(a *analysis.Analyzer, cfg map[string]interf } if err := f.Value.Set(valueToString(v)); err != nil { - return errors.Wrapf(err, "failed to set analyzer setting %q with value %v", k, v) + return fmt.Errorf("failed to set analyzer setting %q with value %v: %w", k, v, err) } } @@ -137,7 +137,7 @@ func (lnt *Linter) configure() error { } if err := lnt.configureAnalyzer(a, analyzerSettings); err != nil { - return errors.Wrapf(err, "failed to configure analyzer %s", analyzerName) + return fmt.Errorf("failed to configure analyzer %s: %w", analyzerName, err) } } @@ -146,11 +146,11 @@ func (lnt *Linter) configure() error { func (lnt *Linter) preRun(lintCtx *linter.Context) error { if err := analysis.Validate(lnt.analyzers); err != nil { - return errors.Wrap(err, "failed to validate analyzers") + return fmt.Errorf("failed to validate analyzers: %w", err) } if err := lnt.configure(); err != nil { - return errors.Wrap(err, "failed to configure analyzers") + return fmt.Errorf("failed to configure analyzers: %w", err) } if lnt.contextSetter != nil { @@ -195,12 +195,12 @@ func allFlagNames(fs *flag.FlagSet) []string { return ret } -func valueToString(v interface{}) string { +func valueToString(v any) string { if ss, ok := v.([]string); ok { return strings.Join(ss, ",") } - if is, ok := v.([]interface{}); ok { + if is, ok := v.([]any); ok { var ss []string for _, i := range is { ss = append(ss, fmt.Sprint(i)) @@ -211,3 +211,7 @@ func valueToString(v interface{}) string { return fmt.Sprint(v) } + +func DummyRun(_ *analysis.Pass) (any, error) { + return nil, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go index 5c24d10964..333ab20f1f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go @@ -2,8 +2,8 @@ package goanalysis import ( "context" + "fmt" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/lint/linter" @@ -24,7 +24,7 @@ func NewMetaLinter(linters []*Linter) *MetaLinter { func (ml MetaLinter) Run(_ context.Context, lintCtx *linter.Context) ([]result.Issue, error) { for _, l := range ml.linters { if err := l.preRun(lintCtx); err != nil { - return nil, errors.Wrapf(err, "failed to pre-run %s", l.Name()) + return nil, fmt.Errorf("failed to pre-run %s: %w", l.Name(), err) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go index 8b460d16b4..a8660b6121 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go @@ -11,12 +11,13 @@ package goanalysis import ( "encoding/gob" + "fmt" "go/token" "runtime" "sort" "sync" - "github.com/pkg/errors" + "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -28,17 +29,17 @@ import ( ) var ( - debugf = logutils.Debug("goanalysis") + debugf = logutils.Debug(logutils.DebugKeyGoAnalysis) - analyzeDebugf = logutils.Debug("goanalysis/analyze") - isMemoryDebug = logutils.HaveDebugTag("goanalysis/memory") - issuesCacheDebugf = logutils.Debug("goanalysis/issues/cache") + analyzeDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisAnalyze) + isMemoryDebug = logutils.HaveDebugTag(logutils.DebugKeyGoAnalysisMemory) + issuesCacheDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisIssuesCache) - factsDebugf = logutils.Debug("goanalysis/facts") - factsCacheDebugf = logutils.Debug("goanalysis/facts/cache") - factsInheritDebugf = logutils.Debug("goanalysis/facts/inherit") - factsExportDebugf = logutils.Debug("goanalysis/facts") - isFactsExportDebug = logutils.HaveDebugTag("goanalysis/facts/export") + factsDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFacts) + factsCacheDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFactsCache) + factsInheritDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFactsInherit) + factsExportDebugf = logutils.Debug(logutils.DebugKeyGoAnalysisFacts) + isFactsExportDebug = logutils.HaveDebugTag(logutils.DebugKeyGoAnalysisFactsExport) ) type Diagnostic struct { @@ -159,10 +160,7 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac act.objectFacts = make(map[objectFactKey]analysis.Fact) act.packageFacts = make(map[packageFactKey]analysis.Fact) - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } + paths := maps.Keys(pkg.Imports) sort.Strings(paths) // for determinism for _, path := range paths { dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) @@ -185,7 +183,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, // and analysis-to-analysis (horizontal) dependencies. // This place is memory-intensive: e.g. Istio project has 120k total actions. - // Therefore optimize it carefully. + // Therefore, optimize it carefully. markedActions := make(map[actKey]struct{}, len(analyzers)*len(pkgs)) for _, a := range analyzers { for _, pkg := range pkgs { @@ -212,10 +210,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, } } - allActions := make([]*action, 0, len(actions)) - for _, act := range actions { - allActions = append(allActions, act) - } + allActions := maps.Values(actions) debugf("Built %d actions", len(actions)) @@ -311,7 +306,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err if pe, ok := act.err.(*errorutil.PanicError); ok { panic(pe) } - retErrors = append(retErrors, errors.Wrap(act.err, act.a.Name)) + retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err)) return } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go index 96c613e83e..6b57cb0c9a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_action.go @@ -1,14 +1,14 @@ package goanalysis import ( + "errors" "fmt" "go/types" + "io" "reflect" "runtime/debug" "time" - "github.com/hashicorp/go-multierror" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/types/objectpath" @@ -49,7 +49,7 @@ type action struct { deps []*action objectFacts map[objectFactKey]analysis.Fact packageFacts map[packageFactKey]analysis.Fact - result interface{} + result any diagnostics []analysis.Diagnostic err error r *runner @@ -97,6 +97,13 @@ func (act *action) waitUntilDependingAnalyzersWorked() { func (act *action) analyzeSafe() { defer func() { if p := recover(); p != nil { + if !act.isroot { + // This line allows to display "hidden" panic with analyzers like buildssa. + // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, + // this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA"). + act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack())) + } + act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) } @@ -118,26 +125,22 @@ func (act *action) analyze() { }(time.Now()) // Report an error if any dependency failures. - var depErrors *multierror.Error + var depErrors error for _, dep := range act.deps { if dep.err == nil { continue } - depErrors = multierror.Append(depErrors, errors.Cause(dep.err)) + depErrors = errors.Join(depErrors, errors.Unwrap(dep.err)) } if depErrors != nil { - depErrors.ErrorFormat = func(e []error) string { - return fmt.Sprintf("failed prerequisites: %v", e) - } - - act.err = depErrors + act.err = fmt.Errorf("failed prerequisites: %w", depErrors) return } // Plumb the output values of the dependencies // into the inputs of this action. Also facts. - inputs := make(map[*analysis.Analyzer]interface{}) + inputs := make(map[*analysis.Analyzer]any) startedAt := time.Now() for _, dep := range act.deps { if dep.pkg == act.pkg { @@ -179,9 +182,9 @@ func (act *action) analyze() { if act.pkg.IllTyped { // It looks like there should be !pass.Analyzer.RunDespiteErrors - // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here - // but it exit before it if packages.Load have failed. - act.err = errors.Wrap(&IllTypedError{Pkg: act.pkg}, "analysis skipped") + // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, + // but it exits before it if packages.Load have failed. + act.err = fmt.Errorf("analysis skipped: %w", &IllTypedError{Pkg: act.pkg}) } else { startedAt = time.Now() act.result, act.err = pass.Analyzer.Run(pass) @@ -331,7 +334,7 @@ func (act *action) loadPersistedFacts() bool { var facts []Fact key := fmt.Sprintf("%s/facts", act.a.Name) if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil { - if err != pkgcache.ErrMissing { + if !errors.Is(err, pkgcache.ErrMissing) && !errors.Is(err, io.EOF) { act.r.log.Warnf("Failed to get persisted facts: %s", err) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go index 9fa396854e..e39e2212c3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner_loadingpackage.go @@ -1,6 +1,7 @@ package goanalysis import ( + "errors" "fmt" "go/ast" "go/parser" @@ -11,7 +12,6 @@ import ( "sync" "sync/atomic" - "github.com/pkg/errors" "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/go/packages" @@ -59,9 +59,9 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { defer lp.decUse(loadMode < LoadModeWholeProgram) if err := lp.loadWithFacts(loadMode); err != nil { - werr := errors.Wrapf(err, "failed to load package %s", lp.pkg.Name) + werr := fmt.Errorf("failed to load package %s: %w", lp.pkg.Name, err) // Don't need to write error to errCh, it will be extracted and reported on another layer. - // Unblock depending actions and propagate error. + // Unblock depending on actions and propagate error. for _, act := range lp.actions { close(act.analysisDoneCh) act.err = werr @@ -123,6 +123,7 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { pkg.TypesInfo = &types.Info{ Types: make(map[ast.Expr]types.TypeAndValue), + Instances: make(map[*ast.Ident]types.Instance), Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), @@ -269,16 +270,16 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error // Load package from export data if loadMode >= LoadModeTypesInfo { if err := lp.loadFromExportData(); err != nil { - // We asked Go to give us up to date export data, yet + // We asked Go to give us up-to-date export data, yet // we can't load it. There must be something wrong. // // Attempt loading from source. This should fail (because // otherwise there would be export data); we just want to // get the compile errors. If loading from source succeeds - // we discard the result, anyway. Otherwise we'll fail + // we discard the result, anyway. Otherwise, we'll fail // when trying to reload from export data later. - // Otherwise it panics because uses already existing (from exported data) types. + // Otherwise, it panics because uses already existing (from exported data) types. pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) if srcErr := lp.loadFromSource(loadMode); srcErr != nil { return srcErr @@ -289,7 +290,7 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error Msg: fmt.Sprintf("could not load export data: %s", err), Kind: packages.ParseError, }) - return errors.Wrap(err, "could not load export data") + return fmt.Errorf("could not load export data: %w", err) } } @@ -311,7 +312,7 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error // Cached facts loading failed: analyze later the action from source. To perform // the analysis we need to load the package from source code. - // Otherwise it panics because uses already existing (from exported data) types. + // Otherwise, it panics because uses already existing (from exported data) types. if loadMode >= LoadModeTypesInfo { pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) } @@ -433,6 +434,7 @@ func (lp *loadingPackage) convertError(err error) []packages.Error { // If you see this error message, please file a bug. lp.log.Warnf("Internal error: error %q (%T) without position", err, err) } + return errs } @@ -444,7 +446,7 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } -func sizeOfValueTreeBytes(v interface{}) int { +func sizeOfValueTreeBytes(v any) int { return sizeOfReflectValueTreeBytes(reflect.ValueOf(v), map[uintptr]struct{}{}) } @@ -492,6 +494,6 @@ func sizeOfReflectValueTreeBytes(rv reflect.Value, visitedPtrs map[uintptr]struc case reflect.Invalid: return 0 default: - panic("unknown rv of type " + fmt.Sprint(rv)) + panic("unknown rv of type " + rv.String()) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go index 7e4cf902e7..559fb1392b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runners.go @@ -14,6 +14,7 @@ import ( "github.com/golangci/golangci-lint/internal/pkgcache" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/timeutils" ) @@ -28,7 +29,7 @@ type runAnalyzersConfig interface { } func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { - log := lintCtx.Log.Child("goanalysis") + log := lintCtx.Log.Child(logutils.DebugKeyGoAnalysis) sw := timeutils.NewStopwatch("analyzers", log) const stagesToPrint = 10 diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocheckcompilerdirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocheckcompilerdirectives.go new file mode 100644 index 0000000000..2592c89946 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocheckcompilerdirectives.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "4d63.com/gocheckcompilerdirectives/checkcompilerdirectives" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGoCheckCompilerDirectives() *goanalysis.Linter { + a := checkcompilerdirectives.Analyzer() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go index 804865cfcb..6e18aeb27d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go @@ -10,11 +10,11 @@ import ( func NewGochecknoglobals() *goanalysis.Linter { gochecknoglobals := checknoglobals.Analyzer() - // gochecknoglobals only lints test files if the `-t` flag is passed so we + // gochecknoglobals only lints test files if the `-t` flag is passed, so we // pass the `t` flag as true to the analyzer before running it. This can be - // turned of by using the regular golangci-lint flags such as `--tests` or + // turned off by using the regular golangci-lint flags such as `--tests` or // `--skip-files`. - linterConfig := map[string]map[string]interface{}{ + linterConfig := map[string]map[string]any{ gochecknoglobals.Name: { "t": true, }, @@ -25,5 +25,5 @@ func NewGochecknoglobals() *goanalysis.Linter { gochecknoglobals.Doc, []*analysis.Analyzer{gochecknoglobals}, linterConfig, - ).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go index f9715bda86..a51b531b94 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go @@ -22,7 +22,7 @@ func NewGochecknoinits() *goanalysis.Linter { analyzer := &analysis.Analyzer{ Name: gochecknoinitsName, Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (interface{}, error) { + Run: func(pass *analysis.Pass) (any, error) { var res []goanalysis.Issue for _, file := range pass.Files { fileIssues := checkFileForInits(file, pass.Fset) @@ -41,6 +41,7 @@ func NewGochecknoinits() *goanalysis.Linter { return nil, nil }, } + return goanalysis.NewLinter( gochecknoinitsName, "Checks that no init functions are present in Go code", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype.go new file mode 100644 index 0000000000..5516179dfc --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype.go @@ -0,0 +1,80 @@ +package golinters + +import ( + "strings" + "sync" + + gochecksumtype "github.com/alecthomas/go-check-sumtype" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +const goCheckSumTypeName = "gochecksumtype" + +func NewGoCheckSumType() *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: goCheckSumTypeName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoCheckSumType(pass) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + return goanalysis.NewLinter( + goCheckSumTypeName, + `Run exhaustiveness checks on Go "sum types"`, + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(_ *linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runGoCheckSumType(pass *analysis.Pass) ([]goanalysis.Issue, error) { + var resIssues []goanalysis.Issue + + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + var unknownError error + errors := gochecksumtype.Run([]*packages.Package{pkg}) + for _, err := range errors { + err, ok := err.(gochecksumtype.Error) + if !ok { + unknownError = err + continue + } + + resIssues = append(resIssues, goanalysis.NewIssue(&result.Issue{ + FromLinter: goCheckSumTypeName, + Text: strings.TrimPrefix(err.Error(), err.Pos().String()+": "), + Pos: err.Pos(), + }, pass)) + } + + return resIssues, unknownError +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go index eb42dd149c..406d34ed6c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go @@ -8,6 +8,7 @@ import ( "github.com/uudashr/gocognit" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -15,54 +16,65 @@ import ( const gocognitName = "gocognit" -func NewGocognit() *goanalysis.Linter { +//nolint:dupl +func NewGocognit(settings *config.GocognitSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runGocognit(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, } + return goanalysis.NewLinter( gocognitName, "Computes and checks the cognitive complexity of functions", []*analysis.Analyzer{analyzer}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var stats []gocognit.Stat - for _, f := range pass.Files { - stats = gocognit.ComplexityStats(f, pass.Fset, stats) - } - if len(stats) == 0 { - return nil, nil - } + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} - sort.SliceStable(stats, func(i, j int) bool { - return stats[i].Complexity > stats[j].Complexity - }) +func runGocognit(pass *analysis.Pass, settings *config.GocognitSettings) []goanalysis.Issue { + var stats []gocognit.Stat + for _, f := range pass.Files { + stats = gocognit.ComplexityStats(f, pass.Fset, stats) + } + if len(stats) == 0 { + return nil + } - res := make([]goanalysis.Issue, 0, len(stats)) - for _, s := range stats { - if s.Complexity <= lintCtx.Settings().Gocognit.MinComplexity { - break // Break as the stats is already sorted from greatest to least - } + sort.SliceStable(stats, func(i, j int) bool { + return stats[i].Complexity > stats[j].Complexity + }) - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: s.Pos, - Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", - s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocognit.MinComplexity), - FromLinter: gocognitName, - }, pass)) - } + issues := make([]goanalysis.Issue, 0, len(stats)) + for _, s := range stats { + if s.Complexity <= settings.MinComplexity { + break // Break as the stats is already sorted from greatest to least + } - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: s.Pos, + Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", + s.Complexity, formatCode(s.FuncName, nil), settings.MinComplexity), + FromLinter: gocognitName, + }, pass)) + } - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + return issues } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go index 0801ee15d8..b518852754 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go @@ -7,6 +7,7 @@ import ( goconstAPI "github.com/jgautheron/goconst" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,72 +15,81 @@ import ( const goconstName = "goconst" -func NewGoconst() *goanalysis.Linter { +//nolint:dupl +func NewGoconst(settings *config.GoConstSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goconstName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - goconstName, - "Finds repeated strings that could be replaced by a constant", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - issues, err := checkConstants(pass, lintCtx) - if err != nil || len(issues) == 0 { + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoconst(pass, settings) + if err != nil { return nil, err } + if len(issues) == 0 { + return nil, nil + } + mu.Lock() resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + goconstName, + "Finds repeated strings that could be replaced by a constant", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]goanalysis.Issue, error) { +func runGoconst(pass *analysis.Pass, settings *config.GoConstSettings) ([]goanalysis.Issue, error) { cfg := goconstAPI.Config{ - IgnoreTests: lintCtx.Settings().Goconst.IgnoreTests, - MatchWithConstants: lintCtx.Settings().Goconst.MatchWithConstants, - MinStringLength: lintCtx.Settings().Goconst.MinStringLen, - MinOccurrences: lintCtx.Settings().Goconst.MinOccurrencesCount, - ParseNumbers: lintCtx.Settings().Goconst.ParseNumbers, - NumberMin: lintCtx.Settings().Goconst.NumberMin, - NumberMax: lintCtx.Settings().Goconst.NumberMax, + IgnoreStrings: settings.IgnoreStrings, + IgnoreTests: settings.IgnoreTests, + MatchWithConstants: settings.MatchWithConstants, + MinStringLength: settings.MinStringLen, + MinOccurrences: settings.MinOccurrencesCount, + ParseNumbers: settings.ParseNumbers, + NumberMin: settings.NumberMin, + NumberMax: settings.NumberMax, ExcludeTypes: map[goconstAPI.Type]bool{}, } - if lintCtx.Settings().Goconst.IgnoreCalls { + + if settings.IgnoreCalls { cfg.ExcludeTypes[goconstAPI.Call] = true } - goconstIssues, err := goconstAPI.Run(pass.Files, pass.Fset, &cfg) + + lintIssues, err := goconstAPI.Run(pass.Files, pass.Fset, &cfg) if err != nil { return nil, err } - if len(goconstIssues) == 0 { + if len(lintIssues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, 0, len(goconstIssues)) - for _, i := range goconstIssues { - textBegin := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, lintCtx.Cfg), i.OccurrencesCount) - var textEnd string + res := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, i := range lintIssues { + text := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, nil), i.OccurrencesCount) + if i.MatchingConst == "" { - textEnd = ", make it a constant" + text += ", make it a constant" } else { - textEnd = fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, lintCtx.Cfg)) + text += fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, nil)) } + res = append(res, goanalysis.NewIssue(&result.Issue{ Pos: i.Pos, - Text: textBegin + textEnd, + Text: text, FromLinter: goconstName, }, pass)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go index 75eb7d3076..3cf43afc62 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go @@ -1,126 +1,148 @@ package golinters import ( + "errors" "fmt" "go/ast" "go/types" "path/filepath" + "reflect" "runtime" "sort" "strings" "sync" - gocriticlinter "github.com/go-critic/go-critic/framework/linter" + "github.com/go-critic/go-critic/checkers" + gocriticlinter "github.com/go-critic/go-critic/linter" + "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -const gocriticName = "gocritic" +const goCriticName = "gocritic" -func NewGocritic() *goanalysis.Linter { +var ( + goCriticDebugf = logutils.Debug(logutils.DebugKeyGoCritic) + isGoCriticDebug = logutils.HaveDebugTag(logutils.DebugKeyGoCritic) +) + +func NewGoCritic(settings *config.GoCriticSettings, cfg *config.Config) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - sizes := types.SizesFor("gc", runtime.GOARCH) + wrapper := &goCriticWrapper{ + cfg: cfg, + sizes: types.SizesFor("gc", runtime.GOARCH), + } analyzer := &analysis.Analyzer{ - Name: gocriticName, + Name: goCriticName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - gocriticName, - `Provides many diagnostics that check for bugs, performance and style issues. -Extensible without recompilation through dynamic rules. -Dynamic rules are written declaratively with AST patterns, filters, report message and optional suggestion.`, - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - linterCtx := gocriticlinter.NewContext(pass.Fset, sizes) - enabledCheckers, err := buildEnabledCheckers(lintCtx, linterCtx) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := wrapper.run(pass) if err != nil { return nil, err } - linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) - var res []goanalysis.Issue - pkgIssues := runGocriticOnPackage(linterCtx, enabledCheckers, pass.Files) - for i := range pkgIssues { - res = append(res, goanalysis.NewIssue(&pkgIssues[i], pass)) - } - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + }, + } + + return goanalysis.NewLinter( + goCriticName, + `Provides diagnostics that check for bugs, performance and style issues. +Extensible without recompilation through dynamic rules. +Dynamic rules are written declaratively with AST patterns, filters, report message and optional suggestion.`, + []*analysis.Analyzer{analyzer}, + nil, + ). + WithContextSetter(func(context *linter.Context) { + wrapper.init(settings, context.Log) + }). + WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func normalizeCheckerInfoParams(info *gocriticlinter.CheckerInfo) gocriticlinter.CheckerParams { - // lowercase info param keys here because golangci-lint's config parser lowercases all strings - ret := gocriticlinter.CheckerParams{} - for k, v := range info.Params { - ret[strings.ToLower(k)] = v +type goCriticWrapper struct { + settingsWrapper *goCriticSettingsWrapper + cfg *config.Config + sizes types.Sizes + once sync.Once +} + +func (w *goCriticWrapper) init(settings *config.GoCriticSettings, logger logutils.Log) { + if settings == nil { + return } - return ret + w.once.Do(func() { + err := checkers.InitEmbeddedRules() + if err != nil { + logger.Fatalf("%s: %v: setting an explicit GOROOT can fix this problem.", goCriticName, err) + } + }) + + settingsWrapper := newGoCriticSettingsWrapper(settings, logger) + + settingsWrapper.inferEnabledChecks() + + if err := settingsWrapper.validate(); err != nil { + logger.Fatalf("%s: invalid settings: %s", goCriticName, err) + } + + w.settingsWrapper = settingsWrapper } -func configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string]config.GocriticCheckSettings) error { - params := allParams[strings.ToLower(info.Name)] - if params == nil { // no config for this checker - return nil +func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { + if w.settingsWrapper == nil { + return nil, fmt.Errorf("the settings wrapper is nil") } - infoParams := normalizeCheckerInfoParams(info) - for k, p := range params { - v, ok := infoParams[k] - if ok { - v.Value = p - continue - } + linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes) - // param `k` isn't supported - if len(info.Params) == 0 { - return fmt.Errorf("checker %s config param %s doesn't exist: checker doesn't have params", - info.Name, k) - } + linterCtx.SetGoVersion(w.settingsWrapper.Go) - var supportedKeys []string - for sk := range info.Params { - supportedKeys = append(supportedKeys, sk) - } - sort.Strings(supportedKeys) + enabledCheckers, err := w.buildEnabledCheckers(linterCtx) + if err != nil { + return nil, err + } - return fmt.Errorf("checker %s config param %s doesn't exist, all existing: %s", - info.Name, k, supportedKeys) + linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) + + pkgIssues := runGocriticOnPackage(linterCtx, enabledCheckers, pass.Files) + + issues := make([]goanalysis.Issue, 0, len(pkgIssues)) + for i := range pkgIssues { + issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass)) } - return nil + return issues, nil } -func buildEnabledCheckers(lintCtx *linter.Context, linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { - s := lintCtx.Settings().Gocritic - allParams := s.GetLowercasedParams() +func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { + allParams := w.settingsWrapper.getLowerCasedParams() var enabledCheckers []*gocriticlinter.Checker for _, info := range gocriticlinter.GetCheckersInfo() { - if !s.IsCheckEnabled(info.Name) { + if !w.settingsWrapper.isCheckEnabled(info.Name) { continue } - if err := configureCheckerInfo(info, allParams); err != nil { + if err := w.configureCheckerInfo(info, allParams); err != nil { return nil, err } @@ -134,34 +156,475 @@ func buildEnabledCheckers(lintCtx *linter.Context, linterCtx *gocriticlinter.Con return enabledCheckers, nil } -func runGocriticOnPackage(linterCtx *gocriticlinter.Context, checkers []*gocriticlinter.Checker, +func runGocriticOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue { var res []result.Issue for _, f := range files { filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) linterCtx.SetFileInfo(filename, f) - issues := runGocriticOnFile(linterCtx, f, checkers) + issues := runGocriticOnFile(linterCtx, f, checks) res = append(res, issues...) } return res } -func runGocriticOnFile(ctx *gocriticlinter.Context, f *ast.File, checkers []*gocriticlinter.Checker) []result.Issue { +func runGocriticOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue { var res []result.Issue - for _, c := range checkers { + for _, c := range checks { // All checkers are expected to use *lint.Context // as read-only structure, so no copying is required. for _, warn := range c.Check(f) { - pos := ctx.FileSet.Position(warn.Node.Pos()) - res = append(res, result.Issue{ + pos := linterCtx.FileSet.Position(warn.Pos) + issue := result.Issue{ Pos: pos, Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), - FromLinter: gocriticName, - }) + FromLinter: goCriticName, + } + + if warn.HasQuickFix() { + issue.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: pos.Column - 1, + Length: int(warn.Suggestion.To - warn.Suggestion.From), + NewString: string(warn.Suggestion.Replacement), + }, + } + } + + res = append(res, issue) } } return res } + +func (w *goCriticWrapper) configureCheckerInfo(info *gocriticlinter.CheckerInfo, allParams map[string]config.GoCriticCheckSettings) error { + params := allParams[strings.ToLower(info.Name)] + if params == nil { // no config for this checker + return nil + } + + infoParams := normalizeCheckerInfoParams(info) + for k, p := range params { + v, ok := infoParams[k] + if ok { + v.Value = w.normalizeCheckerParamsValue(p) + continue + } + + // param `k` isn't supported + if len(info.Params) == 0 { + return fmt.Errorf("checker %s config param %s doesn't exist: checker doesn't have params", + info.Name, k) + } + + supportedKeys := maps.Keys(info.Params) + sort.Strings(supportedKeys) + + return fmt.Errorf("checker %s config param %s doesn't exist, all existing: %s", + info.Name, k, supportedKeys) + } + + return nil +} + +func normalizeCheckerInfoParams(info *gocriticlinter.CheckerInfo) gocriticlinter.CheckerParams { + // lowercase info param keys here because golangci-lint's config parser lowercases all strings + ret := gocriticlinter.CheckerParams{} + for k, v := range info.Params { + ret[strings.ToLower(k)] = v + } + + return ret +} + +// normalizeCheckerParamsValue normalizes value types. +// go-critic asserts that CheckerParam.Value has some specific types, +// but the file parsers (TOML, YAML, JSON) don't create the same representation for raw type. +// then we have to convert value types into the expected value types. +// Maybe in the future, this kind of conversion will be done in go-critic itself. +func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any { + rv := reflect.ValueOf(p) + switch rv.Type().Kind() { + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + return int(rv.Int()) + case reflect.Bool: + return rv.Bool() + case reflect.String: + // Perform variable substitution. + return strings.ReplaceAll(rv.String(), "${configDir}", w.cfg.GetConfigDir()) + default: + return p + } +} + +// TODO(ldez): rewrite and simplify goCriticSettingsWrapper. + +type goCriticSettingsWrapper struct { + *config.GoCriticSettings + + logger logutils.Log + + allCheckers []*gocriticlinter.CheckerInfo + allCheckerMap map[string]*gocriticlinter.CheckerInfo + + inferredEnabledChecks map[string]bool +} + +func newGoCriticSettingsWrapper(settings *config.GoCriticSettings, logger logutils.Log) *goCriticSettingsWrapper { + allCheckers := gocriticlinter.GetCheckersInfo() + + allCheckerMap := make(map[string]*gocriticlinter.CheckerInfo) + for _, checkInfo := range allCheckers { + allCheckerMap[checkInfo.Name] = checkInfo + } + + return &goCriticSettingsWrapper{ + GoCriticSettings: settings, + logger: logger, + allCheckers: allCheckers, + allCheckerMap: allCheckerMap, + inferredEnabledChecks: map[string]bool{}, + } +} + +func (s *goCriticSettingsWrapper) buildTagToCheckersMap() map[string][]string { + tagToCheckers := map[string][]string{} + + for _, checker := range s.allCheckers { + for _, tag := range checker.Tags { + tagToCheckers[tag] = append(tagToCheckers[tag], checker.Name) + } + } + + return tagToCheckers +} + +func (s *goCriticSettingsWrapper) checkerTagsDebugf() { + if !isGoCriticDebug { + return + } + + tagToCheckers := s.buildTagToCheckersMap() + + allTags := maps.Keys(tagToCheckers) + sort.Strings(allTags) + + goCriticDebugf("All gocritic existing tags and checks:") + for _, tag := range allTags { + debugChecksListf(tagToCheckers[tag], " tag %q", tag) + } +} + +func (s *goCriticSettingsWrapper) disabledCheckersDebugf() { + if !isGoCriticDebug { + return + } + + var disabledCheckers []string + for _, checker := range s.allCheckers { + if s.inferredEnabledChecks[strings.ToLower(checker.Name)] { + continue + } + + disabledCheckers = append(disabledCheckers, checker.Name) + } + + if len(disabledCheckers) == 0 { + goCriticDebugf("All checks are enabled") + } else { + debugChecksListf(disabledCheckers, "Final not used") + } +} + +func (s *goCriticSettingsWrapper) inferEnabledChecks() { + s.checkerTagsDebugf() + + enabledByDefaultChecks := s.getDefaultEnabledCheckersNames() + debugChecksListf(enabledByDefaultChecks, "Enabled by default") + + disabledByDefaultChecks := s.getDefaultDisabledCheckersNames() + debugChecksListf(disabledByDefaultChecks, "Disabled by default") + + enabledChecks := make([]string, 0, len(s.EnabledTags)+len(enabledByDefaultChecks)) + + // EnabledTags + if len(s.EnabledTags) != 0 { + tagToCheckers := s.buildTagToCheckersMap() + for _, tag := range s.EnabledTags { + enabledChecks = append(enabledChecks, tagToCheckers[tag]...) + } + + debugChecksListf(enabledChecks, "Enabled by config tags %s", sprintStrings(s.EnabledTags)) + } + + if !(len(s.EnabledTags) == 0 && len(s.EnabledChecks) != 0) { + // don't use default checks only if we have no enabled tags and enable some checks manually + enabledChecks = append(enabledChecks, enabledByDefaultChecks...) + } + + // DisabledTags + if len(s.DisabledTags) != 0 { + enabledChecks = s.filterByDisableTags(enabledChecks, s.DisabledTags) + } + + // EnabledChecks + if len(s.EnabledChecks) != 0 { + debugChecksListf(s.EnabledChecks, "Enabled by config") + + alreadyEnabledChecksSet := stringsSliceToSet(enabledChecks) + for _, enabledCheck := range s.EnabledChecks { + if alreadyEnabledChecksSet[enabledCheck] { + s.logger.Warnf("%s: no need to enable check %q: it's already enabled", goCriticName, enabledCheck) + continue + } + enabledChecks = append(enabledChecks, enabledCheck) + } + } + + // DisabledChecks + if len(s.DisabledChecks) != 0 { + debugChecksListf(s.DisabledChecks, "Disabled by config") + + enabledChecksSet := stringsSliceToSet(enabledChecks) + for _, disabledCheck := range s.DisabledChecks { + if !enabledChecksSet[disabledCheck] { + s.logger.Warnf("%s: check %q was explicitly disabled via config. However, as this check "+ + "is disabled by default, there is no need to explicitly disable it via config.", goCriticName, disabledCheck) + continue + } + delete(enabledChecksSet, disabledCheck) + } + + enabledChecks = nil + for enabledCheck := range enabledChecksSet { + enabledChecks = append(enabledChecks, enabledCheck) + } + } + + s.inferredEnabledChecks = map[string]bool{} + for _, check := range enabledChecks { + s.inferredEnabledChecks[strings.ToLower(check)] = true + } + + debugChecksListf(enabledChecks, "Final used") + + s.disabledCheckersDebugf() +} + +func (s *goCriticSettingsWrapper) validate() error { + if len(s.EnabledTags) == 0 { + if len(s.EnabledChecks) != 0 && len(s.DisabledChecks) != 0 { + return errors.New("both enabled and disabled check aren't allowed for gocritic") + } + } else { + if err := validateStringsUniq(s.EnabledTags); err != nil { + return fmt.Errorf("validate enabled tags: %w", err) + } + + tagToCheckers := s.buildTagToCheckersMap() + + for _, tag := range s.EnabledTags { + if _, ok := tagToCheckers[tag]; !ok { + return fmt.Errorf("gocritic [enabled]tag %q doesn't exist", tag) + } + } + } + + if len(s.DisabledTags) > 0 { + tagToCheckers := s.buildTagToCheckersMap() + for _, tag := range s.EnabledTags { + if _, ok := tagToCheckers[tag]; !ok { + return fmt.Errorf("gocritic [disabled]tag %q doesn't exist", tag) + } + } + } + + if err := validateStringsUniq(s.EnabledChecks); err != nil { + return fmt.Errorf("validate enabled checks: %w", err) + } + + if err := validateStringsUniq(s.DisabledChecks); err != nil { + return fmt.Errorf("validate disabled checks: %w", err) + } + + if err := s.validateCheckerNames(); err != nil { + return fmt.Errorf("validation failed: %w", err) + } + + return nil +} + +func (s *goCriticSettingsWrapper) isCheckEnabled(name string) bool { + return s.inferredEnabledChecks[strings.ToLower(name)] +} + +// getAllCheckerNames returns a map containing all checker names supported by gocritic. +func (s *goCriticSettingsWrapper) getAllCheckerNames() map[string]bool { + allCheckerNames := make(map[string]bool, len(s.allCheckers)) + + for _, checker := range s.allCheckers { + allCheckerNames[strings.ToLower(checker.Name)] = true + } + + return allCheckerNames +} + +func (s *goCriticSettingsWrapper) getDefaultEnabledCheckersNames() []string { + var enabled []string + + for _, info := range s.allCheckers { + enable := s.isEnabledByDefaultCheck(info) + if enable { + enabled = append(enabled, info.Name) + } + } + + return enabled +} + +func (s *goCriticSettingsWrapper) getDefaultDisabledCheckersNames() []string { + var disabled []string + + for _, info := range s.allCheckers { + enable := s.isEnabledByDefaultCheck(info) + if !enable { + disabled = append(disabled, info.Name) + } + } + + return disabled +} + +func (s *goCriticSettingsWrapper) validateCheckerNames() error { + allowedNames := s.getAllCheckerNames() + + for _, name := range s.EnabledChecks { + if !allowedNames[strings.ToLower(name)] { + return fmt.Errorf("enabled checker %s doesn't exist, all existing checkers: %s", + name, sprintAllowedCheckerNames(allowedNames)) + } + } + + for _, name := range s.DisabledChecks { + if !allowedNames[strings.ToLower(name)] { + return fmt.Errorf("disabled checker %s doesn't exist, all existing checkers: %s", + name, sprintAllowedCheckerNames(allowedNames)) + } + } + + for checkName := range s.SettingsPerCheck { + if _, ok := allowedNames[checkName]; !ok { + return fmt.Errorf("invalid setting, checker %s doesn't exist, all existing checkers: %s", + checkName, sprintAllowedCheckerNames(allowedNames)) + } + + if !s.isCheckEnabled(checkName) { + s.logger.Warnf("%s: settings were provided for not enabled check %q", goCriticName, checkName) + } + } + + return nil +} + +func (s *goCriticSettingsWrapper) getLowerCasedParams() map[string]config.GoCriticCheckSettings { + ret := make(map[string]config.GoCriticCheckSettings, len(s.SettingsPerCheck)) + + for checker, params := range s.SettingsPerCheck { + ret[strings.ToLower(checker)] = params + } + + return ret +} + +func (s *goCriticSettingsWrapper) filterByDisableTags(enabledChecks, disableTags []string) []string { + enabledChecksSet := stringsSliceToSet(enabledChecks) + + for _, enabledCheck := range enabledChecks { + checkInfo, checkInfoExists := s.allCheckerMap[enabledCheck] + if !checkInfoExists { + s.logger.Warnf("%s: check %q was not exists via filtering disabled tags", goCriticName, enabledCheck) + continue + } + + hitTags := intersectStringSlice(checkInfo.Tags, disableTags) + if len(hitTags) != 0 { + delete(enabledChecksSet, enabledCheck) + } + } + + debugChecksListf(enabledChecks, "Disabled by config tags %s", sprintStrings(disableTags)) + + enabledChecks = nil + for enabledCheck := range enabledChecksSet { + enabledChecks = append(enabledChecks, enabledCheck) + } + + return enabledChecks +} + +func (s *goCriticSettingsWrapper) isEnabledByDefaultCheck(info *gocriticlinter.CheckerInfo) bool { + return !info.HasTag("experimental") && + !info.HasTag("opinionated") && + !info.HasTag("performance") +} + +func validateStringsUniq(ss []string) error { + set := map[string]bool{} + + for _, s := range ss { + _, ok := set[s] + if ok { + return fmt.Errorf("%q occurs multiple times in list", s) + } + set[s] = true + } + + return nil +} + +func intersectStringSlice(s1, s2 []string) []string { + s1Map := make(map[string]struct{}, len(s1)) + + for _, s := range s1 { + s1Map[s] = struct{}{} + } + + results := make([]string, 0) + for _, s := range s2 { + if _, exists := s1Map[s]; exists { + results = append(results, s) + } + } + + return results +} + +func sprintAllowedCheckerNames(allowedNames map[string]bool) string { + namesSlice := maps.Keys(allowedNames) + return sprintStrings(namesSlice) +} + +func sprintStrings(ss []string) string { + sort.Strings(ss) + return fmt.Sprint(ss) +} + +func debugChecksListf(checks []string, format string, args ...any) { + if !isGoCriticDebug { + return + } + + goCriticDebugf("%s checks (%d): %s", fmt.Sprintf(format, args...), len(checks), sprintStrings(checks)) +} + +func stringsSliceToSet(ss []string) map[string]bool { + ret := make(map[string]bool, len(ss)) + for _, s := range ss { + ret[s] = true + } + + return ret +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go index 5c61fec72c..b502623ba6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go @@ -7,6 +7,7 @@ import ( "github.com/fzipp/gocyclo" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,48 +15,62 @@ import ( const gocycloName = "gocyclo" -func NewGocyclo() *goanalysis.Linter { +//nolint:dupl +func NewGocyclo(settings *config.GoCycloSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: gocycloName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runGoCyclo(pass, settings) + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, } + return goanalysis.NewLinter( gocycloName, "Computes and checks the cyclomatic complexity of functions", []*analysis.Analyzer{analyzer}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var stats gocyclo.Stats - for _, f := range pass.Files { - stats = gocyclo.AnalyzeASTFile(f, pass.Fset, stats) - } - if len(stats) == 0 { - return nil, nil - } + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} - stats = stats.SortAndFilter(-1, lintCtx.Settings().Gocyclo.MinComplexity) +func runGoCyclo(pass *analysis.Pass, settings *config.GoCycloSettings) []goanalysis.Issue { + var stats gocyclo.Stats + for _, f := range pass.Files { + stats = gocyclo.AnalyzeASTFile(f, pass.Fset, stats) + } + if len(stats) == 0 { + return nil + } - res := make([]goanalysis.Issue, 0, len(stats)) - for _, s := range stats { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: s.Pos, - Text: fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", - s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocyclo.MinComplexity), - FromLinter: gocycloName, - }, pass)) - } + stats = stats.SortAndFilter(-1, settings.MinComplexity) - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() + issues := make([]goanalysis.Issue, 0, len(stats)) - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + for _, s := range stats { + text := fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", + s.Complexity, formatCode(s.FuncName, nil), settings.MinComplexity) + + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: s.Pos, + Text: text, + FromLinter: gocycloName, + }, pass)) + } + + return issues } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go index 6252458909..b0ee644349 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot.go @@ -6,6 +6,7 @@ import ( "github.com/tetafro/godot" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -13,72 +14,89 @@ import ( const godotName = "godot" -func NewGodot() *goanalysis.Linter { +func NewGodot(settings *config.GodotSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: godotName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - godotName, - "Check if comments end in a period", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - cfg := lintCtx.Cfg.LintersSettings.Godot - settings := godot.Settings{ - Scope: godot.Scope(cfg.Scope), - Exclude: cfg.Exclude, - Period: true, - Capital: cfg.Capital, + var dotSettings godot.Settings + + if settings != nil { + dotSettings = godot.Settings{ + Scope: godot.Scope(settings.Scope), + Exclude: settings.Exclude, + Period: settings.Period, + Capital: settings.Capital, } // Convert deprecated setting - if cfg.CheckAll { // nolint: staticcheck - settings.Scope = godot.TopLevelScope + // todo(butuzov): remove on v2 release + if settings.CheckAll { //nolint:staticcheck // Keep for retro-compatibility. + dotSettings.Scope = godot.AllScope } + } - if settings.Scope == "" { - settings.Scope = godot.DeclScope - } + if dotSettings.Scope == "" { + dotSettings.Scope = godot.DeclScope + } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []godot.Issue - for _, file := range pass.Files { - iss, err := godot.Run(file, pass.Fset, settings) - if err != nil { - return nil, err - } - issues = append(issues, iss...) + analyzer := &analysis.Analyzer{ + Name: godotName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGodot(pass, dotSettings) + if err != nil { + return nil, err } if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - issue := result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: godotName, - Replacement: &result.Replacement{ - NewLines: []string{i.Replacement}, - }, - } - - res[k] = goanalysis.NewIssue(&issue, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + godotName, + "Check if comments end in a period", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) { + var lintIssues []godot.Issue + for _, file := range pass.Files { + iss, err := godot.Run(file, pass.Fset, settings) + if err != nil { + return nil, err + } + lintIssues = append(lintIssues, iss...) + } + + if len(lintIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issue := result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: godotName, + Replacement: &result.Replacement{ + NewLines: []string{i.Replacement}, + }, + } + + issues[k] = goanalysis.NewIssue(&issue, pass) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go index 2a4dd9fafb..955810417d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go @@ -8,6 +8,7 @@ import ( "github.com/matoous/godox" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -15,49 +16,61 @@ import ( const godoxName = "godox" -func NewGodox() *goanalysis.Linter { +//nolint:dupl +func NewGodox(settings *config.GodoxSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: godoxName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - godoxName, - "Tool for detection of FIXME, TODO and other comment keywords", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []godox.Message - for _, file := range pass.Files { - issues = append(issues, godox.Run(file, pass.Fset, lintCtx.Settings().Godox.Keywords...)...) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runGodox(pass, settings) if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - res[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: godoxName, - }, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + godoxName, + "Tool for detection of FIXME, TODO and other comment keywords", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue { + var messages []godox.Message + for _, file := range pass.Files { + messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...) + } + + if len(messages) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(messages)) + + for k, i := range messages { + issues[k] = goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.Pos.Filename, + Line: i.Pos.Line, + }, + Text: strings.TrimRight(i.Message, "\n"), + FromLinter: godoxName, + }, pass) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go index 0c10005a08..10addc57c2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goerr113.go @@ -10,10 +10,8 @@ import ( func NewGoerr113() *goanalysis.Linter { return goanalysis.NewLinter( "goerr113", - "Golang linter to check the errors handling expressions", - []*analysis.Analyzer{ - err113.NewAnalyzer(), - }, + "Go linter to check the errors handling expressions", + []*analysis.Analyzer{err113.NewAnalyzer()}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go index aa340dcf3c..d2d0d3ccc5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go @@ -1,26 +1,29 @@ package golinters import ( + "fmt" "sync" gofmtAPI "github.com/golangci/gofmt/gofmt" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const gofmtName = "gofmt" -func NewGofmt() *goanalysis.Linter { +func NewGofmt(settings *config.GoFmtSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: gofmtName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( gofmtName, "Gofmt checks whether code was gofmt-ed. By default "+ @@ -28,32 +31,10 @@ func NewGofmt() *goanalysis.Linter { []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := gofmtAPI.Run(f, lintCtx.Settings().Gofmt.Simplify) - if err != nil { // TODO: skip - return nil, err - } - if diff == nil { - continue - } - - is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, gofmtName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGofmt(lintCtx, pass, settings) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -70,3 +51,35 @@ func NewGofmt() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var rewriteRules []gofmtAPI.RewriteRule + for _, rule := range settings.RewriteRules { + rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule)) + } + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules) + if err != nil { // TODO: skip + return nil, err + } + if diff == nil { + continue + } + + is, err := extractIssuesFromPatch(string(diff), lintCtx, gofmtName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go index 39e8092e97..6b7184d65b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt_common.go @@ -6,9 +6,9 @@ import ( "go/token" "strings" - "github.com/pkg/errors" diffpkg "github.com/sourcegraph/go-diff/diff" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" @@ -49,12 +49,12 @@ type hunkChangesParser struct { func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { lines := bytes.Split(h.Body, []byte{'\n'}) - currentOriginalLineNumer := int(h.OrigStartLine) + currentOriginalLineNumber := int(h.OrigStartLine) var ret []diffLine for i, line := range lines { dl := diffLine{ - originalNumber: currentOriginalLineNumer, + originalNumber: currentOriginalLineNumber, } lineStr := string(line) @@ -62,7 +62,7 @@ func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { if strings.HasPrefix(lineStr, "-") { dl.typ = diffLineDeleted dl.data = strings.TrimPrefix(lineStr, "-") - currentOriginalLineNumer++ + currentOriginalLineNumber++ } else if strings.HasPrefix(lineStr, "+") { dl.typ = diffLineAdded dl.data = strings.TrimPrefix(lineStr, "+") @@ -74,12 +74,22 @@ func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { dl.typ = diffLineOriginal dl.data = strings.TrimPrefix(lineStr, " ") - currentOriginalLineNumer++ + currentOriginalLineNumber++ } ret = append(ret, dl) } + // if > 0, then the original file had a 'No newline at end of file' mark + if h.OrigNoNewlineAt > 0 { + dl := diffLine{ + originalNumber: currentOriginalLineNumber + 1, + typ: diffLineAdded, + data: "", + } + ret = append(ret, dl) + } + p.lines = ret } @@ -207,61 +217,55 @@ func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { return p.ret } -func getErrorTextForLinter(lintCtx *linter.Context, linterName string) string { +func getErrorTextForLinter(settings *config.LintersSettings, linterName string) string { text := "File is not formatted" switch linterName { + case gciName: + text = getErrorTextForGci(settings.Gci) case gofumptName: text = "File is not `gofumpt`-ed" - if lintCtx.Settings().Gofumpt.ExtraRules { + if settings.Gofumpt.ExtraRules { text += " with `-extra`" } case gofmtName: text = "File is not `gofmt`-ed" - if lintCtx.Settings().Gofmt.Simplify { + if settings.Gofmt.Simplify { text += " with `-s`" } + for _, rule := range settings.Gofmt.RewriteRules { + text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement) + } case goimportsName: text = "File is not `goimports`-ed" - if lintCtx.Settings().Goimports.LocalPrefixes != "" { - text += " with -local " + lintCtx.Settings().Goimports.LocalPrefixes - } - case gciName: - text = "File is not `gci`-ed" - localPrefixes := lintCtx.Settings().Gci.LocalPrefixes - goimportsFlag := lintCtx.Settings().Goimports.LocalPrefixes - if localPrefixes == "" && goimportsFlag != "" { - localPrefixes = goimportsFlag - } - - if localPrefixes != "" { - text += " with -local " + localPrefixes + if settings.Goimports.LocalPrefixes != "" { + text += " with -local " + settings.Goimports.LocalPrefixes } } return text } -func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Context, linterName string) ([]result.Issue, error) { +func extractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string) ([]result.Issue, error) { diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) if err != nil { - return nil, errors.Wrap(err, "can't parse patch") + return nil, fmt.Errorf("can't parse patch: %w", err) } if len(diffs) == 0 { - return nil, fmt.Errorf("got no diffs from patch parser: %v", diffs) + return nil, fmt.Errorf("got no diffs from patch parser: %v", patch) } - issues := []result.Issue{} + var issues []result.Issue for _, d := range diffs { if len(d.Hunks) == 0 { - log.Warnf("Got no hunks in diff %+v", d) + lintCtx.Log.Warnf("Got no hunks in diff %+v", d) continue } for _, hunk := range d.Hunks { - p := hunkChangesParser{ - log: log, - } + p := hunkChangesParser{log: lintCtx.Log} + changes := p.parse(hunk) + for _, change := range changes { change := change // fix scope i := result.Issue{ @@ -270,7 +274,7 @@ func extractIssuesFromPatch(patch string, log logutils.Log, lintCtx *linter.Cont Filename: d.NewName, Line: change.LineRange.From, }, - Text: getErrorTextForLinter(lintCtx, linterName), + Text: getErrorTextForLinter(lintCtx.Settings(), linterName), Replacement: &change.Replacement, } if change.LineRange.From != change.LineRange.To { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go index e91e54eeaf..c2aaf121de 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt.go @@ -3,77 +3,57 @@ package golinters import ( "bytes" "fmt" - "io/ioutil" + "io" + "os" "sync" - "github.com/pkg/errors" "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" "mvdan.cc/gofumpt/format" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const gofumptName = "gofumpt" -func NewGofumpt() *goanalysis.Linter { +type differ interface { + Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error +} + +func NewGofumpt(settings *config.GofumptSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - differ := difflib.New() + + diff := difflib.New() + + var options format.Options + + if settings != nil { + options = format.Options{ + LangVersion: getLangVersion(settings), + ModulePath: settings.ModulePath, + ExtraRules: settings.ExtraRules, + } + } analyzer := &analysis.Analyzer{ Name: gofumptName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( gofumptName, "Gofumpt checks whether code was gofumpt-ed.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var issues []goanalysis.Issue - - for _, f := range fileNames { - input, err := ioutil.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to open file %s: %w", f, err) - } - output, err := format.Source(input, format.Options{ - ExtraRules: lintCtx.Settings().Gofumpt.ExtraRules, - }) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - if !bytes.Equal(input, output) { - out := bytes.Buffer{} - _, err = out.WriteString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - err = differ.Diff(&out, bytes.NewReader(input), bytes.NewReader(output)) - if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) - } - - diff := out.String() - is, err := extractIssuesFromPatch(diff, lintCtx.Log, lintCtx, gofumptName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gofumpt diff output %q", diff) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGofumpt(lintCtx, pass, diff, options) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -90,3 +70,50 @@ func NewGofumpt() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var issues []goanalysis.Issue + + for _, f := range fileNames { + input, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("unable to open file %s: %w", f, err) + } + + output, err := format.Source(input, options) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + if !bytes.Equal(input, output) { + out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) + + err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) + if err != nil { + return nil, fmt.Errorf("error while running gofumpt: %w", err) + } + + diff := out.String() + is, err := extractIssuesFromPatch(diff, lintCtx, gofumptName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + } + + return issues, nil +} + +func getLangVersion(settings *config.GofumptSettings) string { + if settings == nil || settings.LangVersion == "" { + // TODO: defaults to "1.15", in the future (v2) must be set by using build.Default.ReleaseTags like staticcheck. + return "1.15" + } + return settings.LangVersion +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go index 2ff587b0d1..d3cfefa90b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader.go @@ -4,9 +4,10 @@ import ( "go/token" "sync" - goheader "github.com/denis-tingajkin/go-header" + goheader "github.com/denis-tingaikin/go-header" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,72 +15,90 @@ import ( const goHeaderName = "goheader" -func NewGoHeader() *goanalysis.Linter { +func NewGoHeader(settings *config.GoHeaderSettings) *goanalysis.Linter { var mu sync.Mutex - var issues []goanalysis.Issue + var resIssues []goanalysis.Issue + + conf := &goheader.Configuration{} + if settings != nil { + conf = &goheader.Configuration{ + Values: settings.Values, + Template: settings.Template, + TemplatePath: settings.TemplatePath, + } + } analyzer := &analysis.Analyzer{ Name: goHeaderName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - goHeaderName, - "Checks is file header matches to pattern", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - cfg := lintCtx.Cfg.LintersSettings.Goheader - c := &goheader.Configuration{ - Values: cfg.Values, - Template: cfg.Template, - TemplatePath: cfg.TemplatePath, - } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - if c.TemplatePath == "" && c.Template == "" { - // User did not pass template, so then do not run go-header linter - return nil, nil - } - template, err := c.GetTemplate() - if err != nil { - return nil, err - } - values, err := c.GetValues() + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoHeader(pass, conf) if err != nil { return nil, err } - a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) - var res []goanalysis.Issue - for _, file := range pass.Files { - path := pass.Fset.Position(file.Pos()).Filename - i := a.Analyze(&goheader.Target{ - File: file, - Path: path, - }) - if i == nil { - continue - } - issue := result.Issue{ - Pos: token.Position{ - Line: i.Location().Line + 1, - Column: i.Location().Position, - Filename: path, - }, - Text: i.Message(), - FromLinter: goHeaderName, - } - res = append(res, goanalysis.NewIssue(&issue, pass)) - } - if len(res) == 0 { + + if len(issues) == 0 { return nil, nil } mu.Lock() - issues = append(issues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues + }, + } + + return goanalysis.NewLinter( + goHeaderName, + "Checks is file header matches to pattern", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) { + if conf.TemplatePath == "" && conf.Template == "" { + // User did not pass template, so then do not run go-header linter + return nil, nil + } + + template, err := conf.GetTemplate() + if err != nil { + return nil, err + } + + values, err := conf.GetValues() + if err != nil { + return nil, err + } + + a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) + + var issues []goanalysis.Issue + for _, file := range pass.Files { + path := pass.Fset.Position(file.Pos()).Filename + + i := a.Analyze(&goheader.Target{File: file, Path: path}) + + if i == nil { + continue + } + + issue := result.Issue{ + Pos: token.Position{ + Line: i.Location().Line + 1, + Column: i.Location().Position, + Filename: path, + }, + Text: i.Message(), + FromLinter: goHeaderName, + } + + issues = append(issues, goanalysis.NewIssue(&issue, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go index 9ea4558f40..aac27f38e5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go @@ -1,60 +1,43 @@ package golinters import ( + "fmt" "sync" goimportsAPI "github.com/golangci/gofmt/goimports" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/imports" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" ) const goimportsName = "goimports" -func NewGoimports() *goanalysis.Linter { +func NewGoimports(settings *config.GoImportsSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goimportsName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( goimportsName, - "Goimports does everything that gofmt does. Additionally it checks unused imports", + "Check import statements are formatted according to the 'goimport' command. "+ + "Reformat imports in autofix mode.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - imports.LocalPrefix = lintCtx.Settings().Goimports.LocalPrefixes - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } + imports.LocalPrefix = settings.LocalPrefixes - var issues []goanalysis.Issue - - for _, f := range fileNames { - diff, err := goimportsAPI.Run(f) - if err != nil { // TODO: skip - return nil, err - } - if diff == nil { - continue - } - - is, err := extractIssuesFromPatch(string(diff), lintCtx.Log, lintCtx, goimportsName) - if err != nil { - return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runGoImports(lintCtx, pass) + if err != nil { + return nil, err } if len(issues) == 0 { @@ -71,3 +54,30 @@ func NewGoimports() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var issues []goanalysis.Issue + + for _, f := range fileNames { + diff, err := goimportsAPI.Run(f) + if err != nil { // TODO: skip + return nil, err + } + if diff == nil { + continue + } + + is, err := extractIssuesFromPatch(string(diff), lintCtx, goimportsName) + if err != nil { + return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + } + + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go index 3b1b1b66f1..22ca590481 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go @@ -2,35 +2,71 @@ package golinters import ( "fmt" - "go/ast" - "go/token" - "go/types" "sync" lintAPI "github.com/golangci/lint-1" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet, - typesPkg *types.Package, typesInfo *types.Info) ([]result.Issue, error) { +const golintName = "golint" + +//nolint:dupl +func NewGolint(settings *config.GoLintSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: golintName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runGoLint(pass, settings) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + return goanalysis.NewLinter( + golintName, + "Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runGoLint(pass *analysis.Pass, settings *config.GoLintSettings) ([]goanalysis.Issue, error) { l := new(lintAPI.Linter) - ps, err := l.LintPkg(files, fset, typesPkg, typesInfo) + + ps, err := l.LintPkg(pass.Files, pass.Fset, pass.Pkg, pass.TypesInfo) if err != nil { - return nil, fmt.Errorf("can't lint %d files: %s", len(files), err) + return nil, fmt.Errorf("can't lint %d files: %w", len(pass.Files), err) } if len(ps) == 0 { return nil, nil } - issues := make([]result.Issue, 0, len(ps)) // This is worst case + lintIssues := make([]*result.Issue, 0, len(ps)) // This is worst case for idx := range ps { - if ps[idx].Confidence >= minConfidence { - issues = append(issues, result.Issue{ + if ps[idx].Confidence >= settings.MinConfidence { + lintIssues = append(lintIssues, &result.Issue{ Pos: ps[idx].Position, Text: ps[idx].Text, FromLinter: golintName, @@ -39,40 +75,10 @@ func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.File } } - return issues, nil -} - -const golintName = "golint" - -func NewGolint() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: golintName, - Doc: goanalysis.TheOnlyanalyzerDoc, + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, issue := range lintIssues { + issues = append(issues, goanalysis.NewIssue(issue, pass)) } - return goanalysis.NewLinter( - golintName, - "Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset, pass.Pkg, pass.TypesInfo) - if err != nil || len(res) == 0 { - return nil, err - } - - mu.Lock() - for i := range res { - resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) - } - mu.Unlock() - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + return issues, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go index f7e71b7dae..2e6d77a801 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomnd.go @@ -8,20 +8,38 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -func NewGoMND(cfg *config.Config) *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - mnd.Analyzer, - } +func NewGoMND(settings *config.GoMndSettings) *goanalysis.Linter { + var linterCfg map[string]map[string]any + + if settings != nil { + // TODO(ldez) For compatibility only, must be drop in v2. + if len(settings.Settings) > 0 { + linterCfg = settings.Settings + } else { + cfg := make(map[string]any) + if len(settings.Checks) > 0 { + cfg["checks"] = settings.Checks + } + if len(settings.IgnoredNumbers) > 0 { + cfg["ignored-numbers"] = settings.IgnoredNumbers + } + if len(settings.IgnoredFiles) > 0 { + cfg["ignored-files"] = settings.IgnoredFiles + } + if len(settings.IgnoredFunctions) > 0 { + cfg["ignored-functions"] = settings.IgnoredFunctions + } - var linterCfg map[string]map[string]interface{} - if cfg != nil { - linterCfg = cfg.LintersSettings.Gomnd.Settings + linterCfg = map[string]map[string]any{ + "mnd": cfg, + } + } } return goanalysis.NewLinter( "gomnd", "An analyzer to detect magic numbers.", - analyzers, + []*analysis.Analyzer{mnd.Analyzer}, linterCfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go index 40d3bf786e..56afcd465f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives.go @@ -30,6 +30,7 @@ func NewGoModDirectives(settings *config.GoModDirectivesSettings) *goanalysis.Li analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } return goanalysis.NewLinter( @@ -38,7 +39,7 @@ func NewGoModDirectives(settings *config.GoModDirectivesSettings) *goanalysis.Li []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + analyzer.Run = func(pass *analysis.Pass) (any, error) { once.Do(func() { results, err := gomoddirectives.Analyze(opts) if err != nil { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go index 30ca6cc3de..157bf56c35 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard.go @@ -6,6 +6,7 @@ import ( "github.com/ryancurrah/gomodguard" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -19,31 +20,18 @@ const ( ) // NewGomodguard returns a new Gomodguard linter. -func NewGomodguard() *goanalysis.Linter { - var ( - issues []goanalysis.Issue - mu = sync.Mutex{} - analyzer = &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - ) - - return goanalysis.NewLinter( - gomodguardName, - gomodguardDesc, - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - linterCfg := lintCtx.Cfg.LintersSettings.Gomodguard - - processorCfg := &gomodguard.Configuration{} - processorCfg.Allowed.Modules = linterCfg.Allowed.Modules - processorCfg.Allowed.Domains = linterCfg.Allowed.Domains - processorCfg.Blocked.LocalReplaceDirectives = linterCfg.Blocked.LocalReplaceDirectives - - for n := range linterCfg.Blocked.Modules { - for k, v := range linterCfg.Blocked.Modules[n] { +func NewGomodguard(settings *config.GoModGuardSettings) *goanalysis.Linter { + var issues []goanalysis.Issue + var mu sync.Mutex + + processorCfg := &gomodguard.Configuration{} + if settings != nil { + processorCfg.Allowed.Modules = settings.Allowed.Modules + processorCfg.Allowed.Domains = settings.Allowed.Domains + processorCfg.Blocked.LocalReplaceDirectives = settings.Blocked.LocalReplaceDirectives + + for n := range settings.Blocked.Modules { + for k, v := range settings.Blocked.Modules[n] { m := map[string]gomodguard.BlockedModule{k: { Recommendations: v.Recommendations, Reason: v.Reason, @@ -53,8 +41,8 @@ func NewGomodguard() *goanalysis.Linter { } } - for n := range linterCfg.Blocked.Versions { - for k, v := range linterCfg.Blocked.Versions[n] { + for n := range settings.Blocked.Versions { + for k, v := range settings.Blocked.Versions[n] { m := map[string]gomodguard.BlockedVersion{k: { Version: v.Version, Reason: v.Reason, @@ -63,7 +51,20 @@ func NewGomodguard() *goanalysis.Linter { break } } + } + analyzer := &analysis.Analyzer{ + Name: goanalysis.TheOnlyAnalyzerName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, + } + + return goanalysis.NewLinter( + gomodguardName, + gomodguardDesc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { processor, err := gomodguard.NewProcessor(processorCfg) if err != nil { lintCtx.Log.Warnf("running gomodguard failed: %s: if you are not using go modules "+ @@ -71,14 +72,8 @@ func NewGomodguard() *goanalysis.Linter { return } - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var files []string - - for _, file := range pass.Files { - files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) - } - - gomodguardIssues := processor.ProcessFiles(files) + analyzer.Run = func(pass *analysis.Pass) (any, error) { + gomodguardIssues := processor.ProcessFiles(getFileNames(pass)) mu.Lock() defer mu.Unlock() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go index c5516dc7f9..e513718ba8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname.go @@ -8,10 +8,12 @@ import ( ) func NewGoPrintfFuncName() *goanalysis.Linter { + a := analyzer.Analyzer + return goanalysis.NewLinter( - "goprintffuncname", - "Checks that printf-like functions are named with `f` at the end", - []*analysis.Analyzer{analyzer.Analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go index 328ba5ccc7..235f0e9141 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go @@ -3,13 +3,14 @@ package golinters import ( "fmt" "go/token" - "io/ioutil" + "io" "log" "strconv" "strings" "sync" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" "github.com/securego/gosec/v2/rules" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -26,83 +27,38 @@ func NewGosec(settings *config.GoSecSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - gasConfig := gosec.NewConfig() - var filters []rules.RuleFilter + conf := gosec.NewConfig() if settings != nil { filters = gosecRuleFilters(settings.Includes, settings.Excludes) - - for k, v := range settings.Config { - // Uses ToUpper because the parsing of the map's key change the key to lowercase. - // The value is not impacted by that: the case is respected. - gasConfig.Set(strings.ToUpper(k), v) - } + conf = toGosecConfig(settings) } - ruleDefinitions := rules.Generate(filters...) + logger := log.New(io.Discard, "", 0) - logger := log.New(ioutil.Discard, "", 0) + ruleDefinitions := rules.Generate(false, filters...) analyzer := &analysis.Analyzer{ Name: gosecName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( gosecName, "Inspects source code for security problems", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - gosecAnalyzer := gosec.NewAnalyzer(gasConfig, true, logger) - gosecAnalyzer.LoadRules(ruleDefinitions.Builders()) - - pkg := &packages.Package{ - Fset: pass.Fset, - Syntax: pass.Files, - Types: pass.Pkg, - TypesInfo: pass.TypesInfo, - } - gosecAnalyzer.Check(pkg) - issues, _, _ := gosecAnalyzer.Report() - if len(issues) == 0 { - return nil, nil - } + analyzer.Run = func(pass *analysis.Pass) (any, error) { + // The `gosecAnalyzer` is here because of concurrency issue. + gosecAnalyzer := gosec.NewAnalyzer(conf, true, settings.ExcludeGenerated, false, settings.Concurrency, logger) + gosecAnalyzer.LoadRules(ruleDefinitions.RulesInfo()) - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - text := fmt.Sprintf("%s: %s", i.RuleID, i.What) // TODO: use severity and confidence - var r *result.Range - line, err := strconv.Atoi(i.Line) - if err != nil { - r = &result.Range{} - if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { - lintCtx.Log.Warnf("Can't convert gosec line number %q of %v to int: %s", i.Line, i, err) - continue - } - line = r.From - } - - column, err := strconv.Atoi(i.Col) - if err != nil { - lintCtx.Log.Warnf("Can't convert gosec column number %q of %v to int: %s", i.Col, i, err) - continue - } - - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.File, - Line: line, - Column: column, - }, - Text: text, - LineRange: r, - FromLinter: gosecName, - }, pass)) - } + issues := runGoSec(lintCtx, pass, settings, gosecAnalyzer) mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil @@ -112,6 +68,99 @@ func NewGosec(settings *config.GoSecSettings) *goanalysis.Linter { }).WithLoadMode(goanalysis.LoadModeTypesInfo) } +func runGoSec(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoSecSettings, analyzer *gosec.Analyzer) []goanalysis.Issue { + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + analyzer.CheckRules(pkg) + + secIssues, _, _ := analyzer.Report() + if len(secIssues) == 0 { + return nil + } + + severity, err := convertToScore(settings.Severity) + if err != nil { + lintCtx.Log.Warnf("The provided severity %v", err) + } + + confidence, err := convertToScore(settings.Confidence) + if err != nil { + lintCtx.Log.Warnf("The provided confidence %v", err) + } + + secIssues = filterIssues(secIssues, severity, confidence) + + issues := make([]goanalysis.Issue, 0, len(secIssues)) + for _, i := range secIssues { + text := fmt.Sprintf("%s: %s", i.RuleID, i.What) // TODO: use severity and confidence + + var r *result.Range + + line, err := strconv.Atoi(i.Line) + if err != nil { + r = &result.Range{} + if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { + lintCtx.Log.Warnf("Can't convert gosec line number %q of %v to int: %s", i.Line, i, err) + continue + } + line = r.From + } + + column, err := strconv.Atoi(i.Col) + if err != nil { + lintCtx.Log.Warnf("Can't convert gosec column number %q of %v to int: %s", i.Col, i, err) + continue + } + + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: token.Position{ + Filename: i.File, + Line: line, + Column: column, + }, + Text: text, + LineRange: r, + FromLinter: gosecName, + }, pass)) + } + + return issues +} + +func toGosecConfig(settings *config.GoSecSettings) gosec.Config { + conf := gosec.NewConfig() + + for k, v := range settings.Config { + if k == gosec.Globals { + convertGosecGlobals(v, conf) + continue + } + + // Uses ToUpper because the parsing of the map's key change the key to lowercase. + // The value is not impacted by that: the case is respected. + conf.Set(strings.ToUpper(k), v) + } + + return conf +} + +// based on https://github.com/securego/gosec/blob/47bfd4eb6fc7395940933388550b547538b4c946/config.go#L52-L62 +func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { + globalOptionMap, ok := globalOptionFromConfig.(map[string]any) + if !ok { + return + } + + for k, v := range globalOptionMap { + conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v)) + } +} + // based on https://github.com/securego/gosec/blob/569328eade2ccbad4ce2d0f21ee158ab5356a5cf/cmd/gosec/main.go#L170-L188 func gosecRuleFilters(includes, excludes []string) []rules.RuleFilter { var filters []rules.RuleFilter @@ -126,3 +175,31 @@ func gosecRuleFilters(includes, excludes []string) []rules.RuleFilter { return filters } + +// code borrowed from https://github.com/securego/gosec/blob/69213955dacfd560562e780f723486ef1ca6d486/cmd/gosec/main.go#L250-L262 +func convertToScore(str string) (issue.Score, error) { + str = strings.ToLower(str) + switch str { + case "", "low": + return issue.Low, nil + case "medium": + return issue.Medium, nil + case "high": + return issue.High, nil + default: + return issue.Low, fmt.Errorf("'%s' is invalid, use low instead. Valid options: low, medium, high", str) + } +} + +// code borrowed from https://github.com/securego/gosec/blob/69213955dacfd560562e780f723486ef1ca6d486/cmd/gosec/main.go#L264-L276 +func filterIssues(issues []*issue.Issue, severity, confidence issue.Score) []*issue.Issue { + res := make([]*issue.Issue, 0) + + for _, i := range issues { + if i.Severity >= severity && i.Confidence >= confidence { + res = append(res, i) + } + } + + return res +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go index fa14f1a966..de60ded73e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosimple.go @@ -14,7 +14,7 @@ func NewGosimple(settings *config.StaticCheckSettings) *goanalysis.Linter { return goanalysis.NewLinter( "gosimple", - "Linter for Go source code that specializes in simplifying a code", + "Linter for Go source code that specializes in simplifying code", analyzers, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan.go new file mode 100644 index 0000000000..2e01fcc70d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "strings" + + "github.com/xen0n/gosmopolitan" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGosmopolitan(s *config.GosmopolitanSettings) *goanalysis.Linter { + a := gosmopolitan.NewAnalyzer() + + cfgMap := map[string]map[string]any{} + if s != nil { + cfgMap[a.Name] = map[string]any{ + "allowtimelocal": s.AllowTimeLocal, + "escapehatches": strings.Join(s.EscapeHatches, ","), + "lookattests": !s.IgnoreTests, + "watchforscripts": strings.Join(s.WatchForScripts, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go index b3860e0170..4b1ea6255f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet.go @@ -2,6 +2,7 @@ package golinters import ( "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/appends" "golang.org/x/tools/go/analysis/passes/asmdecl" "golang.org/x/tools/go/analysis/passes/assign" "golang.org/x/tools/go/analysis/passes/atomic" @@ -14,6 +15,8 @@ import ( "golang.org/x/tools/go/analysis/passes/copylock" _ "golang.org/x/tools/go/analysis/passes/ctrlflow" // unused, internal analyzer "golang.org/x/tools/go/analysis/passes/deepequalerrors" + "golang.org/x/tools/go/analysis/passes/defers" + "golang.org/x/tools/go/analysis/passes/directive" "golang.org/x/tools/go/analysis/passes/errorsas" "golang.org/x/tools/go/analysis/passes/fieldalignment" "golang.org/x/tools/go/analysis/passes/findcall" @@ -31,12 +34,14 @@ import ( "golang.org/x/tools/go/analysis/passes/shadow" "golang.org/x/tools/go/analysis/passes/shift" "golang.org/x/tools/go/analysis/passes/sigchanyzer" + "golang.org/x/tools/go/analysis/passes/slog" "golang.org/x/tools/go/analysis/passes/sortslice" "golang.org/x/tools/go/analysis/passes/stdmethods" "golang.org/x/tools/go/analysis/passes/stringintconv" "golang.org/x/tools/go/analysis/passes/structtag" "golang.org/x/tools/go/analysis/passes/testinggoroutine" "golang.org/x/tools/go/analysis/passes/tests" + "golang.org/x/tools/go/analysis/passes/timeformat" "golang.org/x/tools/go/analysis/passes/unmarshal" "golang.org/x/tools/go/analysis/passes/unreachable" "golang.org/x/tools/go/analysis/passes/unsafeptr" @@ -49,6 +54,7 @@ import ( var ( allAnalyzers = []*analysis.Analyzer{ + appends.Analyzer, asmdecl.Analyzer, assign.Analyzer, atomic.Analyzer, @@ -59,6 +65,8 @@ var ( composite.Analyzer, copylock.Analyzer, deepequalerrors.Analyzer, + defers.Analyzer, + directive.Analyzer, errorsas.Analyzer, fieldalignment.Analyzer, findcall.Analyzer, @@ -74,12 +82,14 @@ var ( shadow.Analyzer, shift.Analyzer, sigchanyzer.Analyzer, + slog.Analyzer, sortslice.Analyzer, stdmethods.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, tests.Analyzer, + timeformat.Analyzer, unmarshal.Analyzer, unreachable.Analyzer, unsafeptr.Analyzer, @@ -87,8 +97,9 @@ var ( unusedwrite.Analyzer, } - // https://github.com/golang/go/blob/879db69ce2de814bc3203c39b45617ba51cc5366/src/cmd/vet/main.go#L40-L68 + // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81 defaultAnalyzers = []*analysis.Analyzer{ + appends.Analyzer, asmdecl.Analyzer, assign.Analyzer, atomic.Analyzer, @@ -97,6 +108,8 @@ var ( cgocall.Analyzer, composite.Analyzer, copylock.Analyzer, + defers.Analyzer, + directive.Analyzer, errorsas.Analyzer, framepointer.Analyzer, httpresponse.Analyzer, @@ -107,11 +120,13 @@ var ( printf.Analyzer, shift.Analyzer, sigchanyzer.Analyzer, + slog.Analyzer, stdmethods.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, tests.Analyzer, + timeformat.Analyzer, unmarshal.Analyzer, unreachable.Analyzer, unsafeptr.Analyzer, @@ -119,7 +134,46 @@ var ( } ) +func NewGovet(settings *config.GovetSettings) *goanalysis.Linter { + var conf map[string]map[string]any + if settings != nil { + conf = settings.Settings + } + + return goanalysis.NewLinter( + "govet", + "Vet examines Go source code and reports suspicious constructs, "+ + "such as Printf calls whose arguments do not align with the format string", + analyzersFromConfig(settings), + conf, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func analyzersFromConfig(settings *config.GovetSettings) []*analysis.Analyzer { + if settings == nil { + return defaultAnalyzers + } + + if settings.CheckShadowing { + // Keeping for backward compatibility. + settings.Enable = append(settings.Enable, shadow.Analyzer.Name) + } + + var enabledAnalyzers []*analysis.Analyzer + for _, a := range allAnalyzers { + if isAnalyzerEnabled(a.Name, settings, defaultAnalyzers) { + enabledAnalyzers = append(enabledAnalyzers, a) + } + } + + return enabledAnalyzers +} + func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers []*analysis.Analyzer) bool { + if name == loopclosure.Analyzer.Name && config.IsGreaterThanOrEqualGo122(cfg.Go) { + return false + } + if cfg.EnableAll { for _, n := range cfg.Disable { if n == name { @@ -128,20 +182,24 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } return true } + // Raw for loops should be OK on small slice lengths. for _, n := range cfg.Enable { if n == name { return true } } + for _, n := range cfg.Disable { if n == name { return false } } + if cfg.DisableAll { return false } + for _, a := range defaultAnalyzers { if a.Name == name { return true @@ -149,37 +207,3 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } return false } - -func analyzersFromConfig(cfg *config.GovetSettings) []*analysis.Analyzer { - if cfg == nil { - return defaultAnalyzers - } - - if cfg.CheckShadowing { - // Keeping for backward compatibility. - cfg.Enable = append(cfg.Enable, shadow.Analyzer.Name) - } - - var enabledAnalyzers []*analysis.Analyzer - for _, a := range allAnalyzers { - if isAnalyzerEnabled(a.Name, cfg, defaultAnalyzers) { - enabledAnalyzers = append(enabledAnalyzers, a) - } - } - - return enabledAnalyzers -} - -func NewGovet(cfg *config.GovetSettings) *goanalysis.Linter { - var settings map[string]map[string]interface{} - if cfg != nil { - settings = cfg.Settings - } - return goanalysis.NewLinter( - "govet", - "Vet examines Go source code and reports suspicious constructs, "+ - "such as Printf calls whose arguments do not align with the format string", - analyzersFromConfig(cfg), - settings, - ).WithLoadMode(goanalysis.LoadModeTypesInfo) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper.go new file mode 100644 index 0000000000..41761f2ae0 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper.go @@ -0,0 +1,32 @@ +package golinters + +import ( + grouper "github.com/leonklingele/grouper/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewGrouper(settings *config.GrouperSettings) *goanalysis.Linter { + linterCfg := map[string]map[string]any{} + if settings != nil { + linterCfg["grouper"] = map[string]any{ + "const-require-single-const": settings.ConstRequireSingleConst, + "const-require-grouping": settings.ConstRequireGrouping, + "import-require-single-import": settings.ImportRequireSingleImport, + "import-require-grouping": settings.ImportRequireGrouping, + "type-require-single-type": settings.TypeRequireSingleType, + "type-require-grouping": settings.TypeRequireGrouping, + "var-require-single-var": settings.VarRequireSingleVar, + "var-require-grouping": settings.VarRequireGrouping, + } + } + + return goanalysis.NewLinter( + "grouper", + "Analyze expression groups.", + []*analysis.Analyzer{grouper.New()}, + linterCfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go index c26f08e403..50e2c172ef 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ifshort.go @@ -9,9 +9,9 @@ import ( ) func NewIfshort(settings *config.IfshortSettings) *goanalysis.Linter { - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - cfg = map[string]map[string]interface{}{ + cfg = map[string]map[string]any{ analyzer.Analyzer.Name: { "max-decl-lines": settings.MaxDeclLines, "max-decl-chars": settings.MaxDeclChars, @@ -19,10 +19,12 @@ func NewIfshort(settings *config.IfshortSettings) *goanalysis.Linter { } } + a := analyzer.Analyzer + return goanalysis.NewLinter( - "ifshort", - "Checks that your code uses short syntax for if-statements whenever possible", - []*analysis.Analyzer{analyzer.Analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go index 523aa257b4..b06aec7a3b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas.go @@ -3,8 +3,9 @@ package golinters import ( "fmt" "strconv" + "strings" - "github.com/julz/importas" // nolint: misspell + "github.com/julz/importas" //nolint:misspell "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -25,20 +26,38 @@ func NewImportAs(settings *config.ImportAsSettings) *goanalysis.Linter { return } if len(settings.Alias) == 0 { - lintCtx.Log.Infof("importas settings found, but no aliases listed. List aliases under alias: key.") // nolint: misspell + lintCtx.Log.Infof("importas settings found, but no aliases listed. List aliases under alias: key.") //nolint:misspell } - err := analyzer.Flags.Set("no-unaliased", strconv.FormatBool(settings.NoUnaliased)) - if err != nil { + if err := analyzer.Flags.Set("no-unaliased", strconv.FormatBool(settings.NoUnaliased)); err != nil { lintCtx.Log.Errorf("failed to parse configuration: %v", err) } + if err := analyzer.Flags.Set("no-extra-aliases", strconv.FormatBool(settings.NoExtraAliases)); err != nil { + lintCtx.Log.Errorf("failed to parse configuration: %v", err) + } + + uniqPackages := make(map[string]config.ImportAsAlias) + uniqAliases := make(map[string]config.ImportAsAlias) for _, a := range settings.Alias { if a.Pkg == "" { lintCtx.Log.Errorf("invalid configuration, empty package: pkg=%s alias=%s", a.Pkg, a.Alias) continue } + if v, ok := uniqPackages[a.Pkg]; ok { + lintCtx.Log.Errorf("invalid configuration, multiple aliases for the same package: pkg=%s aliases=[%s,%s]", a.Pkg, a.Alias, v.Alias) + } else { + uniqPackages[a.Pkg] = a + } + + // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`). + if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") { + lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg) + } else { + uniqAliases[a.Alias] = a + } + err := analyzer.Flags.Set("alias", fmt.Sprintf("%s:%s", a.Pkg, a.Alias)) if err != nil { lintCtx.Log.Errorf("failed to parse configuration: %v", err) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/inamedparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/inamedparam.go new file mode 100644 index 0000000000..887f3db2ad --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/inamedparam.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/macabu/inamedparam" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewINamedParam(settings *config.INamedParamSettings) *goanalysis.Linter { + a := inamedparam.Analyzer + + var cfg map[string]map[string]any + + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + "skip-single-param": settings.SkipSingleParam, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go index c87bb2fa51..ac5eb20adb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go @@ -8,10 +8,12 @@ import ( ) func NewIneffassign() *goanalysis.Linter { + a := ineffassign.Analyzer + return goanalysis.NewLinter( - "ineffassign", + a.Name, "Detects when assignments to existing variables are not used", - []*analysis.Analyzer{ineffassign.Analyzer}, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacebloat.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacebloat.go new file mode 100644 index 0000000000..a6dbfe178f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacebloat.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/sashamelentyev/interfacebloat/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewInterfaceBloat(settings *config.InterfaceBloatSettings) *goanalysis.Linter { + a := analyzer.New() + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + analyzer.InterfaceMaxMethodsFlag: settings.Max, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go index 1edbe894cc..71bdfddbe8 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go @@ -22,46 +22,61 @@ func NewInterfacer() *goanalysis.Linter { Name: interfacerName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, - } - return goanalysis.NewLinter( - interfacerName, - "Linter that suggests narrower interface types", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) - ssaPkg := ssa.Pkg - c := &check.Checker{} - prog := goanalysis.MakeFakeLoaderProgram(pass) - c.Program(prog) - c.ProgramSSA(ssaPkg.Prog) - - issues, err := c.Check() + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runInterfacer(pass) if err != nil { return nil, err } + if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - pos := pass.Fset.Position(i.Pos()) - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: pos, - Text: i.Message(), - FromLinter: interfacerName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + interfacerName, + "Linter that suggests narrower interface types", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runInterfacer(pass *analysis.Pass) ([]goanalysis.Issue, error) { + c := &check.Checker{} + + prog := goanalysis.MakeFakeLoaderProgram(pass) + c.Program(prog) + + ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + ssaPkg := ssa.Pkg + c.ProgramSSA(ssaPkg.Prog) + + lintIssues, err := c.Check() + if err != nil { + return nil, err + } + if len(lintIssues) == 0 { + return nil, nil + } + + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, i := range lintIssues { + pos := pass.Fset.Position(i.Pos()) + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pos, + Text: i.Message(), + FromLinter: interfacerName, + }, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ireturn.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ireturn.go new file mode 100644 index 0000000000..dc09dad0ee --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ireturn.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "strings" + + "github.com/butuzov/ireturn/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewIreturn(settings *config.IreturnSettings) *goanalysis.Linter { + a := analyzer.NewAnalyzer() + + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "allow": strings.Join(settings.Allow, ","), + "reject": strings.Join(settings.Reject, ","), + "nonolint": true, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go index 5f26e91ddb..61498087a9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go @@ -2,6 +2,7 @@ package golinters import ( "bufio" + "errors" "fmt" "go/token" "os" @@ -11,25 +12,108 @@ import ( "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) +const lllName = "lll" + +const goCommentDirectivePrefix = "//go:" + +//nolint:dupl +func NewLLL(settings *config.LllSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + analyzer := &analysis.Analyzer{ + Name: lllName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runLll(pass, settings) + if err != nil { + return nil, err + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + }, + } + + return goanalysis.NewLinter( + lllName, + "Reports long lines", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} + +func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + spaces := strings.Repeat(" ", settings.TabWidth) + + var issues []goanalysis.Issue + for _, f := range fileNames { + lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces) + if err != nil { + return nil, err + } + + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + } + + return issues, nil +} + func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { var res []result.Issue f, err := os.Open(filename) if err != nil { - return nil, fmt.Errorf("can't open file %s: %s", filename, err) + return nil, fmt.Errorf("can't open file %s: %w", filename, err) } defer f.Close() - lineNumber := 1 + lineNumber := 0 + multiImportEnabled := false + scanner := bufio.NewScanner(f) for scanner.Scan() { + lineNumber++ + line := scanner.Text() - line = strings.Replace(line, "\t", tabSpaces, -1) + line = strings.ReplaceAll(line, "\t", tabSpaces) + + if strings.HasPrefix(line, goCommentDirectivePrefix) { + continue + } + + if strings.HasPrefix(line, "import") { + multiImportEnabled = strings.HasSuffix(line, "(") + continue + } + + if multiImportEnabled { + if line == ")" { + multiImportEnabled = false + } + + continue + } + lineLen := utf8.RuneCountInString(line) if lineLen > maxLineLen { res = append(res, result.Issue{ @@ -41,21 +125,20 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r FromLinter: lllName, }) } - lineNumber++ } if err := scanner.Err(); err != nil { - if err == bufio.ErrTooLong && maxLineLen < bufio.MaxScanTokenSize { + if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize { // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize // we can return this line as a long line instead of returning an error. // The reason for this change is that this case might happen with autogenerated files // The go-bindata tool for instance might generate a file with a very long line. - // In this case, as it's a auto generated file, the warning returned by lll will + // In this case, as it's an auto generated file, the warning returned by lll will // be ignored. // But if we return a linter error here, and this error happens for an autogenerated // file the error will be discarded (fine), but all the subsequent errors for lll will - // be discarded for other files and we'll miss legit error. + // be discarded for other files, and we'll miss legit error. res = append(res, result.Issue{ Pos: token.Position{ Filename: filename, @@ -66,59 +149,9 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r FromLinter: lllName, }) } else { - return nil, fmt.Errorf("can't scan file %s: %s", filename, err) + return nil, fmt.Errorf("can't scan file %s: %w", filename, err) } } return res, nil } - -const lllName = "lll" - -func NewLLL() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: lllName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - lllName, - "Reports long lines", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) - } - - var res []goanalysis.Issue - spaces := strings.Repeat(" ", lintCtx.Settings().Lll.TabWidth) - for _, f := range fileNames { - issues, err := getLLLIssuesForFile(f, lintCtx.Settings().Lll.LineLength, spaces) - if err != nil { - return nil, err - } - for i := range issues { - res = append(res, goanalysis.NewIssue(&issues[i], pass)) - } - } - - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck.go new file mode 100644 index 0000000000..fc29127c3b --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck.go @@ -0,0 +1,44 @@ +package golinters + +import ( + "github.com/timonwong/loggercheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewLoggerCheck(settings *config.LoggerCheckSettings) *goanalysis.Linter { + var opts []loggercheck.Option + + if settings != nil { + var disable []string + if !settings.Kitlog { + disable = append(disable, "kitlog") + } + if !settings.Klog { + disable = append(disable, "klog") + } + if !settings.Logr { + disable = append(disable, "logr") + } + if !settings.Zap { + disable = append(disable, "zap") + } + + opts = []loggercheck.Option{ + loggercheck.WithDisable(disable), + loggercheck.WithRequireStringKey(settings.RequireStringKey), + loggercheck.WithRules(settings.Rules), + loggercheck.WithNoPrintfLike(settings.NoPrintfLike), + } + } + + analyzer := loggercheck.NewAnalyzer(opts...) + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx.go new file mode 100644 index 0000000000..55509d970c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "github.com/yagipy/maintidx" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewMaintIdx(cfg *config.MaintIdxSettings) *goanalysis.Linter { + analyzer := maintidx.Analyzer + + cfgMap := map[string]map[string]any{ + analyzer.Name: {"under": 20}, + } + + if cfg != nil { + cfgMap[analyzer.Name] = map[string]any{ + "under": cfg.Under, + } + } + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go index cdde09291d..a9828629a2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero.go @@ -1,12 +1,13 @@ package golinters import ( + "fmt" "sync" "github.com/ashanbrown/makezero/makezero" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,47 +15,61 @@ import ( const makezeroName = "makezero" -func NewMakezero() *goanalysis.Linter { +//nolint:dupl +func NewMakezero(settings *config.MakezeroSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: makezeroName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - makezeroName, - "Finds slice declarations with non-zero initial length", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - s := &lintCtx.Settings().Makezero - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - linter := makezero.NewLinter(s.Always) - for _, file := range pass.Files { - hints, err := linter.Run(pass.Fset, pass.TypesInfo, file) - if err != nil { - return nil, errors.Wrapf(err, "makezero linter failed on file %q", file.Name.String()) - } - for _, hint := range hints { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: makezeroName, - }, pass)) - } + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runMakeZero(pass, settings) + if err != nil { + return nil, err } - if len(res) == 0 { + + if len(issues) == 0 { return nil, nil } + mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() + return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + makezeroName, + "Finds slice declarations with non-zero initial length", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) { + zero := makezero.NewLinter(settings.Always) + + var issues []goanalysis.Issue + + for _, file := range pass.Files { + hints, err := zero.Run(pass.Fset, pass.TypesInfo, file) + if err != nil { + return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) + } + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: hint.Position(), + Text: hint.Details(), + FromLinter: makezeroName, + }, pass)) + } + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go index 22422b8c6a..0455be76aa 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go @@ -7,52 +7,68 @@ import ( malignedAPI "github.com/golangci/maligned" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewMaligned() *goanalysis.Linter { - const linterName = "maligned" +const malignedName = "maligned" + +//nolint:dupl +func NewMaligned(settings *config.MalignedSettings) *goanalysis.Linter { var mu sync.Mutex var res []goanalysis.Issue + analyzer := &analysis.Analyzer{ - Name: linterName, + Name: malignedName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Tool to detect Go structs that would take less memory if their fields were sorted", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) + Run: func(pass *analysis.Pass) (any, error) { + issues := runMaligned(pass, settings) - malignedIssues := malignedAPI.Run(prog) - if len(malignedIssues) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, 0, len(malignedIssues)) - for _, i := range malignedIssues { - text := fmt.Sprintf("struct of size %d bytes could be of size %d bytes", i.OldSize, i.NewSize) - if lintCtx.Settings().Maligned.SuggestNewOrder { - text += fmt.Sprintf(":\n%s", formatCodeBlock(i.NewStructDef, lintCtx.Cfg)) - } - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: text, - FromLinter: linterName, - }, pass)) - } - mu.Lock() res = append(res, issues...) mu.Unlock() + return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + malignedName, + "Tool to detect Go structs that would take less memory if their fields were sorted", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runMaligned(pass *analysis.Pass, settings *config.MalignedSettings) []goanalysis.Issue { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + malignedIssues := malignedAPI.Run(prog) + if len(malignedIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, 0, len(malignedIssues)) + for _, i := range malignedIssues { + text := fmt.Sprintf("struct of size %d bytes could be of size %d bytes", i.OldSize, i.NewSize) + if settings.SuggestNewOrder { + text += fmt.Sprintf(":\n%s", formatCodeBlock(i.NewStructDef, nil)) + } + + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: text, + FromLinter: malignedName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror.go new file mode 100644 index 0000000000..d6e2bb06ac --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror.go @@ -0,0 +1,70 @@ +package golinters + +import ( + "sync" + + "github.com/butuzov/mirror" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewMirror() *goanalysis.Linter { + var ( + mu sync.Mutex + issues []goanalysis.Issue + ) + + a := mirror.NewAnalyzer() + a.Run = func(pass *analysis.Pass) (any, error) { + // mirror only lints test files if the `--with-tests` flag is passed, + // so we pass the `with-tests` flag as true to the analyzer before running it. + // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` + // or can be disabled per linter via exclude rules. + // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) + violations := mirror.Run(pass, true) + + if len(violations) == 0 { + return nil, nil + } + + for index := range violations { + i := violations[index].Issue(pass.Fset) + + issue := result.Issue{ + FromLinter: a.Name, + Text: i.Message, + Pos: i.Start, + } + + if i.InlineFix != "" { + issue.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: i.Start.Column - 1, + Length: len(i.Original), + NewString: i.InlineFix, + }, + } + } + + mu.Lock() + issues = append(issues, goanalysis.NewIssue(&issue, pass)) + mu.Unlock() + } + + return nil, nil + } + + analyzer := goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return issues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) + + return analyzer +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go index 80ecf9bb66..0f69cdb870 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go @@ -9,119 +9,48 @@ import ( "github.com/golangci/misspell" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func runMisspellOnFile(fileName string, r *misspell.Replacer, lintCtx *linter.Context) ([]result.Issue, error) { - var res []result.Issue - fileContent, err := lintCtx.FileCache.GetFileBytes(fileName) - if err != nil { - return nil, fmt.Errorf("can't get file %s contents: %s", fileName, err) - } - - // use r.Replace, not r.ReplaceGo because r.ReplaceGo doesn't find - // issues inside strings: it searches only inside comments. r.Replace - // searches all words: it treats input as a plain text. A standalone misspell - // tool uses r.Replace by default. - _, diffs := r.Replace(string(fileContent)) - for _, diff := range diffs { - text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) - pos := token.Position{ - Filename: fileName, - Line: diff.Line, - Column: diff.Column + 1, - } - replacement := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: diff.Column, - Length: len(diff.Original), - NewString: diff.Corrected, - }, - } - - res = append(res, result.Issue{ - Pos: pos, - Text: text, - FromLinter: misspellName, - Replacement: replacement, - }) - } - - return res, nil -} - const misspellName = "misspell" -func NewMisspell() *goanalysis.Linter { +func NewMisspell(settings *config.MisspellSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - var ruleErr error analyzer := &analysis.Analyzer{ Name: misspellName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( misspellName, - "Finds commonly misspelled English words in comments", + "Finds commonly misspelled English words", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - r := misspell.Replacer{ - Replacements: misspell.DictMain, - } - - // Figure out regional variations - settings := lintCtx.Settings().Misspell - locale := settings.Locale - switch strings.ToUpper(locale) { - case "": - // nothing - case "US": - r.AddRuleList(misspell.DictAmerican) - case "UK", "GB": - r.AddRuleList(misspell.DictBritish) - case "NZ", "AU", "CA": - ruleErr = fmt.Errorf("unknown locale: %q", locale) - } - - if ruleErr == nil { - if len(settings.IgnoreWords) != 0 { - r.RemoveRule(settings.IgnoreWords) - } - - r.Compile() - } + replacer, ruleErr := createMisspellReplacer(settings) - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + analyzer.Run = func(pass *analysis.Pass) (any, error) { if ruleErr != nil { return nil, ruleErr } - var fileNames []string - for _, f := range pass.Files { - pos := pass.Fset.PositionFor(f.Pos(), false) - fileNames = append(fileNames, pos.Filename) + issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode) + if err != nil { + return nil, err } - var res []goanalysis.Issue - for _, f := range fileNames { - issues, err := runMisspellOnFile(f, &r, lintCtx) - if err != nil { - return nil, err - } - for i := range issues { - res = append(res, goanalysis.NewIssue(&issues[i], pass)) - } - } - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil @@ -130,3 +59,97 @@ func NewMisspell() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) { + fileNames := getFileNames(pass) + + var issues []goanalysis.Issue + for _, filename := range fileNames { + lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode) + if err != nil { + return nil, err + } + + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + } + + return issues, nil +} + +func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) { + replacer := &misspell.Replacer{ + Replacements: misspell.DictMain, + } + + // Figure out regional variations + switch strings.ToUpper(settings.Locale) { + case "": + // nothing + case "US": + replacer.AddRuleList(misspell.DictAmerican) + case "UK", "GB": + replacer.AddRuleList(misspell.DictBritish) + case "NZ", "AU", "CA": + return nil, fmt.Errorf("unknown locale: %q", settings.Locale) + } + + if len(settings.IgnoreWords) != 0 { + replacer.RemoveRule(settings.IgnoreWords) + } + + // It can panic. + replacer.Compile() + + return replacer, nil +} + +func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) { + fileContent, err := lintCtx.FileCache.GetFileBytes(filename) + if err != nil { + return nil, fmt.Errorf("can't get file %s contents: %w", filename, err) + } + + // `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments. + // `r.Replace` searches all words: it treats input as a plain text. + // The standalone misspell tool uses `r.Replace` by default. + var replace func(input string) (string, []misspell.Diff) + switch strings.ToLower(mode) { + case "restricted": + replace = replacer.ReplaceGo + default: + replace = replacer.Replace + } + + _, diffs := replace(string(fileContent)) + + var res []result.Issue + + for _, diff := range diffs { + text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) + + pos := token.Position{ + Filename: filename, + Line: diff.Line, + Column: diff.Column + 1, + } + + replacement := &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: diff.Column, + Length: len(diff.Original), + NewString: diff.Corrected, + }, + } + + res = append(res, result.Issue{ + Pos: pos, + Text: text, + FromLinter: misspellName, + Replacement: replacement, + }) + } + + return res, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag.go new file mode 100644 index 0000000000..72d9195828 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "go-simpler.org/musttag" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewMustTag(setting *config.MustTagSettings) *goanalysis.Linter { + var funcs []musttag.Func + + if setting != nil { + for _, fn := range setting.Functions { + funcs = append(funcs, musttag.Func{ + Name: fn.Name, + Tag: fn.Tag, + ArgPos: fn.ArgPos, + }) + } + } + + a := musttag.New(funcs...) + + return goanalysis. + NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go index 86735a51ad..6153860fb4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go @@ -1,123 +1,25 @@ package golinters import ( - "fmt" - "go/ast" - "go/token" - "sync" - + "github.com/alexkohler/nakedret/v2" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -type nakedretVisitor struct { - maxLength int - f *token.FileSet - issues []result.Issue -} - -func (v *nakedretVisitor) processFuncDecl(funcDecl *ast.FuncDecl) { - file := v.f.File(funcDecl.Pos()) - functionLineLength := file.Position(funcDecl.End()).Line - file.Position(funcDecl.Pos()).Line - - // Scan the body for usage of the named returns - for _, stmt := range funcDecl.Body.List { - s, ok := stmt.(*ast.ReturnStmt) - if !ok { - continue - } - - if len(s.Results) != 0 { - continue - } - - file := v.f.File(s.Pos()) - if file == nil || functionLineLength <= v.maxLength { - continue - } - if funcDecl.Name == nil { - continue - } - - v.issues = append(v.issues, result.Issue{ - FromLinter: nakedretName, - Text: fmt.Sprintf("naked return in func `%s` with %d lines of code", - funcDecl.Name.Name, functionLineLength), - Pos: v.f.Position(s.Pos()), - }) - } -} - -func (v *nakedretVisitor) Visit(node ast.Node) ast.Visitor { - funcDecl, ok := node.(*ast.FuncDecl) - if !ok { - return v - } - - var namedReturns []*ast.Ident - - // We've found a function - if funcDecl.Type != nil && funcDecl.Type.Results != nil { - for _, field := range funcDecl.Type.Results.List { - for _, ident := range field.Names { - if ident != nil { - namedReturns = append(namedReturns, ident) - } - } - } - } - - if len(namedReturns) == 0 || funcDecl.Body == nil { - return v +func NewNakedret(settings *config.NakedretSettings) *goanalysis.Linter { + var maxLines int + if settings != nil { + maxLines = settings.MaxFuncLines } - v.processFuncDecl(funcDecl) - return v -} - -const nakedretName = "nakedret" + a := nakedret.NakedReturnAnalyzer(uint(maxLines)) -func NewNakedret() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: nakedretName, - Doc: goanalysis.TheOnlyanalyzerDoc, - } return goanalysis.NewLinter( - nakedretName, - "Finds naked returns in functions greater than a specified function length", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - for _, file := range pass.Files { - v := nakedretVisitor{ - maxLength: lintCtx.Settings().Nakedret.MaxFuncLines, - f: pass.Fset, - } - ast.Walk(&v, file) - for i := range v.issues { - res = append(res, goanalysis.NewIssue(&v.issues[i], pass)) - } - } - - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go index 0998a8ce2f..12ad69eceb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif.go @@ -7,6 +7,7 @@ import ( "github.com/nakabonne/nestif" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,52 +15,65 @@ import ( const nestifName = "nestif" -func NewNestif() *goanalysis.Linter { +//nolint:dupl +func NewNestif(settings *config.NestifSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - nestifName, - "Reports deeply nested if statements", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - checker := &nestif.Checker{ - MinComplexity: lintCtx.Settings().Nestif.MinComplexity, - } - var issues []nestif.Issue - for _, f := range pass.Files { - issues = append(issues, checker.Check(f, pass.Fset)...) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runNestIf(pass, settings) + if len(issues) == 0 { return nil, nil } - sort.SliceStable(issues, func(i, j int) bool { - return issues[i].Complexity > issues[j].Complexity - }) - - res := make([]goanalysis.Issue, 0, len(issues)) - for _, i := range issues { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: nestifName, - }, pass)) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + nestifName, + "Reports deeply nested if statements", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue { + checker := &nestif.Checker{ + MinComplexity: settings.MinComplexity, + } + + var lintIssues []nestif.Issue + for _, f := range pass.Files { + lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...) + } + + if len(lintIssues) == 0 { + return nil + } + + sort.SliceStable(lintIssues, func(i, j int) bool { + return lintIssues[i].Complexity > lintIssues[j].Complexity + }) + + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + for _, i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: i.Message, + FromLinter: nestifName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go index d8a9a613ef..2ea16f2f39 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilerr.go @@ -9,6 +9,7 @@ import ( func NewNilErr() *goanalysis.Linter { a := nilerr.Analyzer + return goanalysis.NewLinter( a.Name, "Finds the code that returns nil even if it checks that the error is not nil.", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil.go new file mode 100644 index 0000000000..804557b76d --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil.go @@ -0,0 +1,30 @@ +package golinters + +import ( + "strings" + + "github.com/Antonboom/nilnil/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNilNil(cfg *config.NilNilSettings) *goanalysis.Linter { + a := analyzer.New() + + cfgMap := make(map[string]map[string]any) + if cfg != nil && len(cfg.CheckedTypes) != 0 { + cfgMap[a.Name] = map[string]any{ + "checked-types": strings.Join(cfg.CheckedTypes, ","), + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go index 3b661c64c8..a359548f42 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nlreturn.go @@ -4,16 +4,24 @@ import ( "github.com/ssgreg/nlreturn/v2/pkg/nlreturn" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -func NewNLReturn() *goanalysis.Linter { +func NewNLReturn(settings *config.NlreturnSettings) *goanalysis.Linter { + a := nlreturn.NewAnalyzer() + + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "block-size": settings.BlockSize, + } + } + return goanalysis.NewLinter( - "nlreturn", + a.Name, "nlreturn checks for a new line before return and branch statements to increase code clarity", - []*analysis.Analyzer{ - nlreturn.NewAnalyzer(), - }, - nil, + []*analysis.Analyzer{a}, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go index b5c4a4be24..cff9c97dcf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/noctx.go @@ -8,14 +8,12 @@ import ( ) func NewNoctx() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - noctx.Analyzer, - } + a := noctx.Analyzer return goanalysis.NewLinter( - "noctx", - "noctx finds sending http request without context.Context", - analyzers, + a.Name, + "Finds sending http request without context.Context", + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go index 889cff864c..ae372ab795 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint.go @@ -7,87 +7,99 @@ import ( "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/nolintlint" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -const NolintlintName = "nolintlint" +const NoLintLintName = "nolintlint" -func NewNoLintLint() *goanalysis.Linter { +//nolint:dupl +func NewNoLintLint(settings *config.NoLintLintSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: NolintlintName, + Name: NoLintLintName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - NolintlintName, - "Reports ill-formed or insufficient nolint directives", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var needs nolintlint.Needs - settings := lintCtx.Settings().NoLintLint - if settings.RequireExplanation { - needs |= nolintlint.NeedsExplanation - } - if !settings.AllowLeadingSpace { - needs |= nolintlint.NeedsMachineOnly - } - if settings.RequireSpecific { - needs |= nolintlint.NeedsSpecific - } - if !settings.AllowUnused { - needs |= nolintlint.NeedsUnused - } - - lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runNoLintLint(pass, settings) if err != nil { return nil, err } - nodes := make([]ast.Node, 0, len(pass.Files)) - for _, n := range pass.Files { - nodes = append(nodes, n) - } - issues, err := lnt.Run(pass.Fset, nodes...) - if err != nil { - return nil, fmt.Errorf("linter failed to run: %s", err) - } - var res []goanalysis.Issue - for _, i := range issues { - expectNoLint := false - var expectedNolintLinter string - if ii, ok := i.(nolintlint.UnusedCandidate); ok { - expectedNolintLinter = ii.ExpectedLinter - expectNoLint = true - } - issue := &result.Issue{ - FromLinter: NolintlintName, - Text: i.Details(), - Pos: i.Position(), - ExpectNoLint: expectNoLint, - ExpectedNoLintLinter: expectedNolintLinter, - Replacement: i.Replacement(), - } - res = append(res, goanalysis.NewIssue(issue, pass)) - } - - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + NoLintLintName, + "Reports ill-formed or insufficient nolint directives", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) { + var needs nolintlint.Needs + if settings.RequireExplanation { + needs |= nolintlint.NeedsExplanation + } + if settings.RequireSpecific { + needs |= nolintlint.NeedsSpecific + } + if !settings.AllowUnused { + needs |= nolintlint.NeedsUnused + } + + lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + if err != nil { + return nil, err + } + + nodes := make([]ast.Node, 0, len(pass.Files)) + for _, n := range pass.Files { + nodes = append(nodes, n) + } + + lintIssues, err := lnt.Run(pass.Fset, nodes...) + if err != nil { + return nil, fmt.Errorf("linter failed to run: %w", err) + } + + var issues []goanalysis.Issue + + for _, i := range lintIssues { + expectNoLint := false + var expectedNolintLinter string + if ii, ok := i.(nolintlint.UnusedCandidate); ok { + expectedNolintLinter = ii.ExpectedLinter + expectNoLint = true + } + + issue := &result.Issue{ + FromLinter: NoLintLintName, + Text: i.Details(), + Pos: i.Position(), + ExpectNoLint: expectNoLint, + ExpectedNoLintLinter: expectedNolintLinter, + Replacement: i.Replacement(), + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md index 3d440d5a5b..1643df7a57 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/README.md @@ -1,17 +1,17 @@ # nolintlint -nolintlint is a Go static analysis tool to find ill-formed or insufficiently explained `// nolint` directives for golangci -(or any other linter, using th ) +nolintlint is a Go static analysis tool to find ill-formed or insufficiently explained `//nolint` directives for golangci-lint +(or any other linter, using this package) ## Purpose To ensure that lint exceptions have explanations. Consider the case below: ```Go -import "crypto/md5" //nolint +import "crypto/md5" //nolint:all func hash(data []byte) []byte { - return md5.New().Sum(data) //nolint + return md5.New().Sum(data) //nolint:all } ``` @@ -27,5 +27,5 @@ func hash(data []byte) []byte { ``` `nolintlint` can also identify cases where you may have written `// nolint`. Finally `nolintlint`, can also enforce that you -use the machine-readable nolint directive format `//nolint` and that you mention what linter is being suppressed, as shown above when we write `//nolint:gosec`. +use the machine-readable nolint directive format `//nolint:all` and that you mention what linter is being suppressed, as shown above when we write `//nolint:gosec`. diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go index 4466cab41f..639a5b822c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -19,12 +19,10 @@ type BaseIssue struct { replacement *result.Replacement } -//nolint:gocritic // TODO must be change in the future. func (b BaseIssue) Position() token.Position { return b.position } -//nolint:gocritic // TODO must be change in the future. func (b BaseIssue) Replacement() *result.Replacement { return b.replacement } @@ -33,53 +31,45 @@ type ExtraLeadingSpace struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. func (i ExtraLeadingSpace) Details() string { return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) } -//nolint:gocritic // TODO must be change in the future. func (i ExtraLeadingSpace) String() string { return toString(i) } type NotMachine struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. func (i NotMachine) Details() string { expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", i.fullDirective, expected) } -//nolint:gocritic // TODO must be change in the future. func (i NotMachine) String() string { return toString(i) } type NotSpecific struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. func (i NotSpecific) Details() string { return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", i.fullDirective, i.directiveWithOptionalLeadingSpace) } -//nolint:gocritic // TODO must be change in the future. func (i NotSpecific) String() string { return toString(i) } type ParseError struct { BaseIssue } -//nolint:gocritic // TODO must be change in the future. func (i ParseError) Details() string { return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", i.fullDirective, i.directiveWithOptionalLeadingSpace) } -//nolint:gocritic // TODO must be change in the future. func (i ParseError) String() string { return toString(i) } type NoExplanation struct { @@ -87,13 +77,11 @@ type NoExplanation struct { fullDirectiveWithoutExplanation string } -//nolint:gocritic // TODO must be change in the future. func (i NoExplanation) Details() string { return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", i.fullDirective, i.fullDirectiveWithoutExplanation) } -//nolint:gocritic // TODO must be change in the future. func (i NoExplanation) String() string { return toString(i) } type UnusedCandidate struct { @@ -101,7 +89,6 @@ type UnusedCandidate struct { ExpectedLinter string } -//nolint:gocritic // TODO must be change in the future. func (i UnusedCandidate) Details() string { details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) if i.ExpectedLinter != "" { @@ -110,7 +97,6 @@ func (i UnusedCandidate) Details() string { return details } -//nolint:gocritic // TODO must be change in the future. func (i UnusedCandidate) String() string { return toString(i) } func toString(i Issue) string { @@ -152,7 +138,7 @@ func NewLinter(needs Needs, excludes []string) (*Linter, error) { } return &Linter{ - needs: needs, + needs: needs | NeedsMachineOnly, excludeByLinter: excludeByName, }, nil } @@ -184,12 +170,14 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { leadingSpace = leadingSpaceMatches[1] } - directiveWithOptionalLeadingSpace := comment.Text - if len(leadingSpace) > 0 { - split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") - directiveWithOptionalLeadingSpace = "// " + strings.TrimSpace(split[1]) + directiveWithOptionalLeadingSpace := "//" + if leadingSpace != "" { + directiveWithOptionalLeadingSpace += " " } + split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") + directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) + pos := fset.Position(comment.Pos()) end := fset.Position(comment.End()) @@ -199,8 +187,8 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { position: pos, } - // check for, report and eliminate leading spaces so we can check for other issues - if len(leadingSpace) > 0 { + // check for, report and eliminate leading spaces, so we can check for other issues + if leadingSpace != "" { removeWhitespace := &result.Replacement{ Inline: &result.InlineFix{ StartCol: pos.Column + 1, @@ -227,8 +215,9 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { } lintersText, explanation := fullMatches[1], fullMatches[2] + var linters []string - if len(lintersText) > 0 { + if lintersText != "" && !strings.HasPrefix(lintersText, "all") { lls := strings.Split(lintersText, ",") linters = make([]string, 0, len(lls)) rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") @@ -281,7 +270,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation - // otherwise, check if we are excluding all of the mentioned linters + // otherwise, check if we are excluding all the mentioned linters for _, ll := range linters { if !l.excludeByLinter[ll] { // if a linter does require explanation needsExplanation = true diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nonamedreturns.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nonamedreturns.go new file mode 100644 index 0000000000..7856f6d613 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nonamedreturns.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/firefart/nonamedreturns/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNoNamedReturns(settings *config.NoNamedReturnsSettings) *goanalysis.Linter { + a := analyzer.Analyzer + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + analyzer.FlagReportErrorInDefer: settings.ReportErrorInDefer, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosnakecase.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosnakecase.go new file mode 100644 index 0000000000..26d5d6d4c8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosnakecase.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/sivchari/nosnakecase" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNoSnakeCase() *goanalysis.Linter { + a := nosnakecase.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosprintfhostport.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosprintfhostport.go new file mode 100644 index 0000000000..a63b9bb5f5 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nosprintfhostport.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/stbenjam/no-sprintf-host-port/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewNoSprintfHostPort() *goanalysis.Linter { + a := analyzer.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go index 3b784baf5a..c49f74aa2f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/paralleltest.go @@ -4,18 +4,27 @@ import ( "github.com/kunwardeep/paralleltest/pkg/paralleltest" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -func NewParallelTest() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - paralleltest.NewAnalyzer(), +func NewParallelTest(settings *config.ParallelTestSettings) *goanalysis.Linter { + a := paralleltest.NewAnalyzer() + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + "i": settings.IgnoreMissing, + "ignoremissingsubtests": settings.IgnoreMissingSubtests, + }, + } } return goanalysis.NewLinter( - "paralleltest", - "paralleltest detects missing usage of t.Parallel() method in your Go test", - analyzers, - nil, - ).WithLoadMode(goanalysis.LoadModeSyntax) + a.Name, + "Detects missing usage of t.Parallel() method in your Go test", + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint.go new file mode 100644 index 0000000000..acaa3a5225 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/perfsprint.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "github.com/catenacyber/perfsprint/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewPerfSprint(settings *config.PerfSprintSettings) *goanalysis.Linter { + a := analyzer.New() + + cfg := map[string]map[string]any{ + a.Name: {"fiximports": false}, + } + + if settings != nil { + cfg[a.Name]["int-conversion"] = settings.IntConversion + cfg[a.Name]["err-error"] = settings.ErrError + cfg[a.Name]["errorf"] = settings.ErrorF + cfg[a.Name]["sprintf1"] = settings.SprintF1 + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go index 3d06cf1472..f48d57562e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go @@ -7,6 +7,7 @@ import ( "github.com/alexkohler/prealloc/pkg" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" @@ -14,44 +15,51 @@ import ( const preallocName = "prealloc" -func NewPrealloc() *goanalysis.Linter { +//nolint:dupl +func NewPreAlloc(settings *config.PreallocSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: preallocName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - preallocName, - "Finds slice declarations that could potentially be preallocated", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - s := &lintCtx.Settings().Prealloc - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []goanalysis.Issue - hints := pkg.Check(pass.Files, s.Simple, s.RangeLoops, s.ForLoops) - for _, hint := range hints { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(hint.Pos), - Text: fmt.Sprintf("Consider preallocating %s", formatCode(hint.DeclaredSliceName, lintCtx.Cfg)), - FromLinter: preallocName, - }, pass)) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runPreAlloc(pass, settings) - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + preallocName, + "Finds slice declarations that could potentially be pre-allocated", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue { + var issues []goanalysis.Issue + + hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops) + + for _, hint := range hints { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(hint.Pos), + Text: fmt.Sprintf("Consider pre-allocating %s", formatCode(hint.DeclaredSliceName, nil)), + FromLinter: preallocName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go index caccd48239..d3c25e274b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/predeclared.go @@ -11,9 +11,9 @@ import ( func NewPredeclared(settings *config.PredeclaredSettings) *goanalysis.Linter { a := predeclared.Analyzer - var cfg map[string]map[string]interface{} + var cfg map[string]map[string]any if settings != nil { - cfg = map[string]map[string]interface{}{ + cfg = map[string]map[string]any{ a.Name: { predeclared.IgnoreFlag: settings.Ignore, predeclared.QualifiedFlag: settings.Qualified, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go index 4fba3d2747..381c57489d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/promlinter.go @@ -7,57 +7,71 @@ import ( "github.com/yeya24/promlinter" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewPromlinter() *goanalysis.Linter { +const promlinterName = "promlinter" + +func NewPromlinter(settings *config.PromlinterSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - const linterName = "promlinter" - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, + var promSettings promlinter.Setting + if settings != nil { + promSettings = promlinter.Setting{ + Strict: settings.Strict, + DisabledLintFuncs: settings.DisabledLinters, + } } - return goanalysis.NewLinter( - linterName, - "Check Prometheus metrics naming via promlint", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - strict := lintCtx.Cfg.LintersSettings.Promlinter.Strict - disabledLinters := lintCtx.Cfg.LintersSettings.Promlinter.DisabledLinters - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - issues := promlinter.RunLint(pass.Fset, pass.Files, promlinter.Setting{ - Strict: strict, - DisabledLintFuncs: disabledLinters, - }) + analyzer := &analysis.Analyzer{ + Name: promlinterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: func(pass *analysis.Pass) (any, error) { + issues := runPromLinter(pass, promSettings) if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - issue := result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("Metric: %s Error: %s", i.Metric, i.Text), - FromLinter: linterName, - } - - res[k] = goanalysis.NewIssue(&issue, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + promlinterName, + "Check Prometheus metrics naming via promlint", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runPromLinter(pass *analysis.Pass, promSettings promlinter.Setting) []goanalysis.Issue { + lintIssues := promlinter.RunLint(pass.Fset, pass.Files, promSettings) + + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, len(lintIssues)) + for k, i := range lintIssues { + issue := result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("Metric: %s Error: %s", i.Metric, i.Text), + FromLinter: promlinterName, + } + + issues[k] = goanalysis.NewIssue(&issue, pass) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter.go new file mode 100644 index 0000000000..9a5e7b4db8 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter.go @@ -0,0 +1,74 @@ +package golinters + +import ( + "sync" + + "github.com/ghostiam/protogetter" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewProtoGetter(settings *config.ProtoGetterSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + var cfg protogetter.Config + if settings != nil { + cfg = protogetter.Config{ + SkipGeneratedBy: settings.SkipGeneratedBy, + SkipFiles: settings.SkipFiles, + SkipAnyGenerated: settings.SkipAnyGenerated, + ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend, + } + } + cfg.Mode = protogetter.GolangciLintMode + + a := protogetter.NewAnalyzer(&cfg) + a.Run = func(pass *analysis.Pass) (any, error) { + pgIssues, err := protogetter.Run(pass, &cfg) + if err != nil { + return nil, err + } + + issues := make([]goanalysis.Issue, len(pgIssues)) + for i, issue := range pgIssues { + report := &result.Issue{ + FromLinter: a.Name, + Pos: issue.Pos, + Text: issue.Message, + Replacement: &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: issue.InlineFix.StartCol, + Length: issue.InlineFix.Length, + NewString: issue.InlineFix.NewString, + }, + }, + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/reassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/reassign.go new file mode 100644 index 0000000000..a6dd670530 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/reassign.go @@ -0,0 +1,32 @@ +package golinters + +import ( + "fmt" + "strings" + + "github.com/curioswitch/go-reassign" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewReassign(settings *config.ReassignSettings) *goanalysis.Linter { + a := reassign.NewAnalyzer() + + var cfg map[string]map[string]any + if settings != nil && len(settings.Patterns) > 0 { + cfg = map[string]map[string]any{ + a.Name: { + reassign.FlagPattern: fmt.Sprintf("^(%s)$", strings.Join(settings.Patterns, "|")), + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go index 182013c826..46d8b1c9ca 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive.go @@ -5,15 +5,14 @@ import ( "encoding/json" "fmt" "go/token" - "io/ioutil" + "os" "reflect" + "sync" "github.com/BurntSushi/toml" - "github.com/mgechev/dots" reviveConfig "github.com/mgechev/revive/config" "github.com/mgechev/revive/lint" "github.com/mgechev/revive/rule" - "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -25,21 +24,25 @@ import ( const reviveName = "revive" -var reviveDebugf = logutils.Debug("revive") +var reviveDebugf = logutils.Debug(logutils.DebugKeyRevive) -// jsonObject defines a JSON object of an failure +// jsonObject defines a JSON object of a failure type jsonObject struct { Severity lint.Severity lint.Failure `json:",inline"` } // NewRevive returns a new Revive linter. -func NewRevive(cfg *config.ReviveSettings) *goanalysis.Linter { - var issues []goanalysis.Issue +// + +func NewRevive(settings *config.ReviveSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } return goanalysis.NewLinter( @@ -48,78 +51,87 @@ func NewRevive(cfg *config.ReviveSettings) *goanalysis.Linter { []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var files []string - - for _, file := range pass.Files { - files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) - } - - conf, err := getReviveConfig(cfg) + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runRevive(lintCtx, pass, settings) if err != nil { return nil, err } - formatter, err := reviveConfig.GetFormatter("json") - if err != nil { - return nil, err + if len(issues) == 0 { + return nil, nil } - revive := lint.New(ioutil.ReadFile) + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() - lintingRules, err := reviveConfig.GetLintingRules(conf) - if err != nil { - return nil, err - } - - packages, err := dots.ResolvePackages(files, []string{}) - if err != nil { - return nil, err - } + return nil, nil + } + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} - failures, err := revive.Lint(packages, lintingRules, *conf) - if err != nil { - return nil, err - } +func runRevive(lintCtx *linter.Context, pass *analysis.Pass, settings *config.ReviveSettings) ([]goanalysis.Issue, error) { + packages := [][]string{getFileNames(pass)} - formatChan := make(chan lint.Failure) - exitChan := make(chan bool) + conf, err := getReviveConfig(settings) + if err != nil { + return nil, err + } - var output string - go func() { - output, err = formatter.Format(formatChan, *conf) - if err != nil { - lintCtx.Log.Errorf("Format error: %v", err) - } - exitChan <- true - }() + formatter, err := reviveConfig.GetFormatter("json") + if err != nil { + return nil, err + } - for f := range failures { - if f.Confidence < conf.Confidence { - continue - } + revive := lint.New(os.ReadFile, settings.MaxOpenFiles) - formatChan <- f - } + lintingRules, err := reviveConfig.GetLintingRules(conf, []lint.Rule{}) + if err != nil { + return nil, err + } - close(formatChan) - <-exitChan + failures, err := revive.Lint(packages, lintingRules, *conf) + if err != nil { + return nil, err + } - var results []jsonObject - err = json.Unmarshal([]byte(output), &results) - if err != nil { - return nil, err - } + formatChan := make(chan lint.Failure) + exitChan := make(chan bool) - for i := range results { - issues = append(issues, reviveToIssue(pass, &results[i])) - } + var output string + go func() { + output, err = formatter.Format(formatChan, *conf) + if err != nil { + lintCtx.Log.Errorf("Format error: %v", err) + } + exitChan <- true + }() - return nil, nil + for f := range failures { + if f.Confidence < conf.Confidence { + continue } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeSyntax) + + formatChan <- f + } + + close(formatChan) + <-exitChan + + var results []jsonObject + err = json.Unmarshal([]byte(output), &results) + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + for i := range results { + issues = append(issues, reviveToIssue(pass, &results[i])) + } + + return issues, nil } func reviveToIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { @@ -146,9 +158,9 @@ func reviveToIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { } // This function mimics the GetConfig function of revive. -// This allow to get default values and right types. +// This allows to get default values and right types. // https://github.com/golangci/golangci-lint/issues/1745 -// https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L155 +// https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L182 func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) { conf := defaultConfig() @@ -158,13 +170,13 @@ func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) { err := toml.NewEncoder(buf).Encode(rawRoot) if err != nil { - return nil, errors.Wrap(err, "failed to encode configuration") + return nil, fmt.Errorf("failed to encode configuration: %w", err) } conf = &lint.Config{} - _, err = toml.DecodeReader(buf, conf) + _, err = toml.NewDecoder(buf).Decode(conf) if err != nil { - return nil, errors.Wrap(err, "failed to decode configuration") + return nil, fmt.Errorf("failed to decode configuration: %w", err) } } @@ -175,18 +187,19 @@ func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) { return conf, nil } -func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { - rawRoot := map[string]interface{}{ +func createConfigMap(cfg *config.ReviveSettings) map[string]any { + rawRoot := map[string]any{ "ignoreGeneratedHeader": cfg.IgnoreGeneratedHeader, "confidence": cfg.Confidence, "severity": cfg.Severity, "errorCode": cfg.ErrorCode, "warningCode": cfg.WarningCode, + "enableAllRules": cfg.EnableAllRules, } - rawDirectives := map[string]map[string]interface{}{} + rawDirectives := map[string]map[string]any{} for _, directive := range cfg.Directives { - rawDirectives[directive.Name] = map[string]interface{}{ + rawDirectives[directive.Name] = map[string]any{ "severity": directive.Severity, } } @@ -195,9 +208,9 @@ func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { rawRoot["directive"] = rawDirectives } - rawRules := map[string]map[string]interface{}{} + rawRules := map[string]map[string]any{} for _, s := range cfg.Rules { - rawRules[s.Name] = map[string]interface{}{ + rawRules[s.Name] = map[string]any{ "severity": s.Severity, "arguments": safeTomlSlice(s.Arguments), "disabled": s.Disabled, @@ -211,19 +224,19 @@ func createConfigMap(cfg *config.ReviveSettings) map[string]interface{} { return rawRoot } -func safeTomlSlice(r []interface{}) []interface{} { +func safeTomlSlice(r []any) []any { if len(r) == 0 { return nil } - if _, ok := r[0].(map[interface{}]interface{}); !ok { + if _, ok := r[0].(map[any]any); !ok { return r } - var typed []interface{} + var typed []any for _, elt := range r { - item := map[string]interface{}{} - for k, v := range elt.(map[interface{}]interface{}) { + item := map[string]any{} + for k, v := range elt.(map[any]any) { item[k.(string)] = v } @@ -234,7 +247,7 @@ func safeTomlSlice(r []interface{}) []interface{} { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L15 +// Extracted from https://github.com/mgechev/revive/blob/v1.3.7/config/config.go#L15 var defaultRules = []lint.Rule{ &rule.VarDeclarationsRule{}, &rule.PackageCommentsRule{}, @@ -243,7 +256,6 @@ var defaultRules = []lint.Rule{ &rule.ExportedRule{}, &rule.VarNamingRule{}, &rule.IndentErrorFlowRule{}, - &rule.IfReturnRule{}, &rule.RangeRule{}, &rule.ErrorfRule{}, &rule.ErrorNamingRule{}, @@ -255,14 +267,102 @@ var defaultRules = []lint.Rule{ &rule.TimeNamingRule{}, &rule.ContextKeysType{}, &rule.ContextAsArgumentRule{}, + &rule.EmptyBlockRule{}, + &rule.SuperfluousElseRule{}, + &rule.UnusedParamRule{}, + &rule.UnreachableCodeRule{}, + &rule.RedefinesBuiltinIDRule{}, } +var allRules = append([]lint.Rule{ + &rule.ArgumentsLimitRule{}, + &rule.CyclomaticRule{}, + &rule.FileHeaderRule{}, + &rule.ConfusingNamingRule{}, + &rule.GetReturnRule{}, + &rule.ModifiesParamRule{}, + &rule.ConfusingResultsRule{}, + &rule.DeepExitRule{}, + &rule.AddConstantRule{}, + &rule.FlagParamRule{}, + &rule.UnnecessaryStmtRule{}, + &rule.StructTagRule{}, + &rule.ModifiesValRecRule{}, + &rule.ConstantLogicalExprRule{}, + &rule.BoolLiteralRule{}, + &rule.ImportsBlocklistRule{}, + &rule.FunctionResultsLimitRule{}, + &rule.MaxPublicStructsRule{}, + &rule.RangeValInClosureRule{}, + &rule.RangeValAddress{}, + &rule.WaitGroupByValueRule{}, + &rule.AtomicRule{}, + &rule.EmptyLinesRule{}, + &rule.LineLengthLimitRule{}, + &rule.CallToGCRule{}, + &rule.DuplicatedImportsRule{}, + &rule.ImportShadowingRule{}, + &rule.BareReturnRule{}, + &rule.UnusedReceiverRule{}, + &rule.UnhandledErrorRule{}, + &rule.CognitiveComplexityRule{}, + &rule.StringOfIntRule{}, + &rule.StringFormatRule{}, + &rule.EarlyReturnRule{}, + &rule.UnconditionalRecursionRule{}, + &rule.IdenticalBranchesRule{}, + &rule.DeferRule{}, + &rule.UnexportedNamingRule{}, + &rule.FunctionLength{}, + &rule.NestedStructs{}, + &rule.UselessBreak{}, + &rule.UncheckedTypeAssertionRule{}, + &rule.TimeEqualRule{}, + &rule.BannedCharsRule{}, + &rule.OptimizeOperandsOrderRule{}, + &rule.UseAnyRule{}, + &rule.DataRaceRule{}, + &rule.CommentSpacingsRule{}, + &rule.IfReturnRule{}, + &rule.RedundantImportAlias{}, + &rule.ImportAliasNamingRule{}, + &rule.EnforceMapStyleRule{}, + &rule.EnforceRepeatedArgTypeStyleRule{}, + &rule.EnforceSliceStyleRule{}, + &rule.MaxControlNestingRule{}, +}, defaultRules...) + +const defaultConfidence = 0.8 + // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L133 +// Extracted from https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L145 func normalizeConfig(cfg *lint.Config) { + // NOTE(ldez): this custom section for golangci-lint should be kept. + // --- if cfg.Confidence == 0 { - cfg.Confidence = 0.8 + cfg.Confidence = defaultConfidence } + if cfg.Severity == "" { + cfg.Severity = lint.SeverityWarning + } + // --- + + if len(cfg.Rules) == 0 { + cfg.Rules = map[string]lint.RuleConfig{} + } + if cfg.EnableAllRules { + // Add to the configuration all rules not yet present in it + for _, r := range allRules { + ruleName := r.Name() + _, alreadyInConf := cfg.Rules[ruleName] + if alreadyInConf { + continue + } + // Add the rule with an empty conf for + cfg.Rules[ruleName] = lint.RuleConfig{} + } + } + severity := cfg.Severity if severity != "" { for k, v := range cfg.Rules { @@ -281,10 +381,10 @@ func normalizeConfig(cfg *lint.Config) { } // This element is not exported by revive, so we need copy the code. -// Extracted from https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L182 +// Extracted from https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L214 func defaultConfig() *lint.Config { defaultConfig := lint.Config{ - Confidence: 0.0, + Confidence: defaultConfidence, Severity: lint.SeverityWarning, Rules: map[string]lint.RuleConfig{}, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go deleted file mode 100644 index d4c89d3829..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowerrcheck.go +++ /dev/null @@ -1,23 +0,0 @@ -package golinters - -import ( - "github.com/jingyugao/rowserrcheck/passes/rowserr" - "golang.org/x/tools/go/analysis" - - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" -) - -func NewRowsErrCheck() *goanalysis.Linter { - analyzer := rowserr.NewAnalyzer() - return goanalysis.NewLinter( - "rowserrcheck", - "checks whether Err of rows is checked successfully", - []*analysis.Analyzer{analyzer}, - nil, - ).WithLoadMode(goanalysis.LoadModeTypesInfo). - WithContextSetter(func(lintCtx *linter.Context) { - pkgs := lintCtx.Settings().RowsErrCheck.Packages - analyzer.Run = rowserr.NewRun(pkgs...) - }) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck.go new file mode 100644 index 0000000000..d67efd0692 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck.go @@ -0,0 +1,25 @@ +package golinters + +import ( + "github.com/jingyugao/rowserrcheck/passes/rowserr" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewRowsErrCheck(settings *config.RowsErrCheckSettings) *goanalysis.Linter { + var pkgs []string + if settings != nil { + pkgs = settings.Packages + } + + a := rowserr.NewAnalyzer(pkgs...) + + return goanalysis.NewLinter( + a.Name, + "checks whether Rows.Err of rows is checked successfully", + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go index ba3921e196..e6ef15ede0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go @@ -15,6 +15,7 @@ import ( const scopelintName = "scopelint" +//nolint:dupl func NewScopelint() *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue @@ -22,43 +23,53 @@ func NewScopelint() *goanalysis.Linter { analyzer := &analysis.Analyzer{ Name: scopelintName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - scopelintName, - "Scopelint checks for unpinned variables in go programs", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue - for _, file := range pass.Files { - n := Node{ - fset: pass.Fset, - DangerObjects: map[*ast.Object]int{}, - UnsafeObjects: map[*ast.Object]int{}, - SkipFuncs: map[*ast.FuncLit]int{}, - issues: &res, - } - ast.Walk(&n, file) - } + Run: func(pass *analysis.Pass) (any, error) { + issues := runScopeLint(pass) - if len(res) == 0 { + if len(issues) == 0 { return nil, nil } mu.Lock() - for i := range res { - resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) - } + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + }, + } + + return goanalysis.NewLinter( + scopelintName, + "Scopelint checks for unpinned variables in go programs", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } +func runScopeLint(pass *analysis.Pass) []goanalysis.Issue { + var lintIssues []result.Issue + + for _, file := range pass.Files { + n := Node{ + fset: pass.Fset, + DangerObjects: map[*ast.Object]int{}, + UnsafeObjects: map[*ast.Object]int{}, + SkipFuncs: map[*ast.FuncLit]int{}, + issues: &lintIssues, + } + ast.Walk(&n, file) + } + + var issues []goanalysis.Issue + for i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + } + + return issues +} + // The code below is copy-pasted from https://github.com/kyoh86/scopelint 92cbe2cc9276abda0e309f52cc9e309d407f174e // Node represents a Node being linted. @@ -73,6 +84,7 @@ type Node struct { // Visit method is invoked for each node encountered by Walk. // If the result visitor w is not nil, Walk visits each of the children // of node with the visitor w, followed by a call of w.Visit(nil). +// //nolint:gocyclo,gocritic func (f *Node) Visit(node ast.Node) ast.Visitor { switch typedNode := node.(type) { @@ -162,13 +174,14 @@ func (f *Node) Visit(node ast.Node) ast.Visitor { // The variadic arguments may start with link and category types, // and must end with a format string and any arguments. +// //nolint:interfacer -func (f *Node) errorf(n ast.Node, format string, args ...interface{}) { +func (f *Node) errorf(n ast.Node, format string, args ...any) { pos := f.fset.Position(n.Pos()) f.errorAtf(pos, format, args...) } -func (f *Node) errorAtf(pos token.Position, format string, args ...interface{}) { +func (f *Node) errorAtf(pos token.Position, format string, args ...any) { *f.issues = append(*f.issues, result.Issue{ Pos: pos, Text: fmt.Sprintf(format, args...), diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sloglint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sloglint.go new file mode 100644 index 0000000000..acea90d536 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sloglint.go @@ -0,0 +1,31 @@ +package golinters + +import ( + "go-simpler.org/sloglint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewSlogLint(settings *config.SlogLintSettings) *goanalysis.Linter { + var opts *sloglint.Options + if settings != nil { + opts = &sloglint.Options{ + NoMixedArgs: settings.NoMixedArgs, + KVOnly: settings.KVOnly, + AttrOnly: settings.AttrOnly, + ContextOnly: settings.ContextOnly, + StaticMsg: settings.StaticMsg, + NoRawKeys: settings.NoRawKeys, + KeyNamingCase: settings.KeyNamingCase, + ArgsOnSepLines: settings.ArgsOnSepLines, + } + } + + a := sloglint.New(opts) + + return goanalysis. + NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck.go new file mode 100644 index 0000000000..9341244777 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/spancheck.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/jjti/go-spancheck" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewSpancheck(settings *config.SpancheckSettings) *goanalysis.Linter { + cfg := spancheck.NewDefaultConfig() + + if settings != nil { + if settings.Checks != nil { + cfg.EnabledChecks = settings.Checks + } + + if settings.IgnoreCheckSignatures != nil { + cfg.IgnoreChecksSignaturesSlice = settings.IgnoreCheckSignatures + } + } + + a := spancheck.NewAnalyzerWithConfig(cfg) + + return goanalysis. + NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go index 48ca246e70..e63b292a20 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/sqlclosecheck.go @@ -8,14 +8,12 @@ import ( ) func NewSQLCloseCheck() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - analyzer.NewAnalyzer(), - } + a := analyzer.NewAnalyzer() return goanalysis.NewLinter( - "sqlclosecheck", - "Checks that sql.Rows and sql.Stmt are closed.", - analyzers, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go index 2226eabb4e..673484630a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck.go @@ -9,12 +9,12 @@ import ( func NewStaticcheck(settings *config.StaticCheckSettings) *goanalysis.Linter { cfg := staticCheckConfig(settings) - analyzers := setupStaticCheckAnalyzers(staticcheck.Analyzers, getGoVersion(settings), cfg.Checks) return goanalysis.NewLinter( "staticcheck", - "Staticcheck is a go vet on steroids, applying a ton of static analysis checks", + "It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary."+ + " The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint.", analyzers, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go index dc6360d7e0..0eb21ec9cf 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/staticcheck_common.go @@ -12,7 +12,7 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) -var debugf = logutils.Debug("megacheck") +var debugf = logutils.Debug(logutils.DebugKeyMegacheck) func getGoVersion(settings *config.StaticCheckSettings) string { var goVersion string @@ -24,8 +24,7 @@ func getGoVersion(settings *config.StaticCheckSettings) string { return goVersion } - // TODO: uses "1.13" for backward compatibility, but in the future (v2) must be set by using build.Default.ReleaseTags like staticcheck. - return "1.13" + return "1.17" } func setupStaticCheckAnalyzers(src []*lint.Analyzer, goVersion string, checks []string) []*analysis.Analyzer { @@ -99,7 +98,8 @@ func staticCheckConfig(settings *config.StaticCheckSettings) *scconfig.Config { } // https://github.com/dominikh/go-tools/blob/9bf17c0388a65710524ba04c2d821469e639fdc2/lintcmd/lint.go#L437-L477 -// nolint // Keep the original source code. +// +//nolint:gocritic // Keep the original source code. func filterAnalyzerNames(analyzers []string, checks []string) map[string]bool { allowedChecks := map[string]bool{} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go index 7c16f8ec36..f3df0c2f35 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go @@ -1,4 +1,4 @@ -package golinters // nolint:dupl +package golinters import ( "fmt" @@ -7,49 +7,65 @@ import ( structcheckAPI "github.com/golangci/check/cmd/structcheck" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewStructcheck() *goanalysis.Linter { - const linterName = "structcheck" +const structcheckName = "structcheck" + +//nolint:dupl +func NewStructcheck(settings *config.StructCheckSettings) *goanalysis.Linter { var mu sync.Mutex - var res []goanalysis.Issue + var resIssues []goanalysis.Issue + analyzer := &analysis.Analyzer{ - Name: linterName, + Name: structcheckName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Finds unused struct fields", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - checkExported := lintCtx.Settings().Structcheck.CheckExportedFields - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) + Run: func(pass *analysis.Pass) (any, error) { + issues := runStructCheck(pass, settings) - structcheckIssues := structcheckAPI.Run(prog, checkExported) - if len(structcheckIssues) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, 0, len(structcheckIssues)) - for _, i := range structcheckIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("%s is unused", formatCode(i.FieldName, lintCtx.Cfg)), - FromLinter: linterName, - }, pass)) - } - mu.Lock() - res = append(res, issues...) + resIssues = append(resIssues, issues...) mu.Unlock() + return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res + }, + } + + return goanalysis.NewLinter( + structcheckName, + "Finds unused struct fields", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +//nolint:dupl +func runStructCheck(pass *analysis.Pass, settings *config.StructCheckSettings) []goanalysis.Issue { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + lintIssues := structcheckAPI.Run(prog, settings.CheckExportedFields) + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + + for _, i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("%s is unused", formatCode(i.FieldName, nil)), + FromLinter: structcheckName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go index 899f6ff582..d9b0f87c87 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/stylecheck.go @@ -15,7 +15,7 @@ func NewStylecheck(settings *config.StaticCheckSettings) *goanalysis.Linter { // `scconfig.Analyzer` is a singleton, then it's not possible to have more than one instance for all staticcheck "sub-linters". // When we will merge the 4 "sub-linters", the problem will disappear: https://github.com/golangci/golangci-lint/issues/357 // Currently only stylecheck analyzer has a configuration in staticcheck. - scconfig.Analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { + scconfig.Analyzer.Run = func(_ *analysis.Pass) (any, error) { return cfg, nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign.go new file mode 100644 index 0000000000..c23838f702 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign.go @@ -0,0 +1,75 @@ +package golinters + +import ( + "sync" + + "github.com/4meepo/tagalign" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/result" +) + +func NewTagAlign(settings *config.TagAlignSettings) *goanalysis.Linter { + var mu sync.Mutex + var resIssues []goanalysis.Issue + + options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)} + + if settings != nil { + options = append(options, tagalign.WithAlign(settings.Align)) + + if settings.Sort || len(settings.Order) > 0 { + options = append(options, tagalign.WithSort(settings.Order...)) + } + + // Strict style will be applied only if Align and Sort are enabled together. + if settings.Strict && settings.Align && settings.Sort { + options = append(options, tagalign.WithStrictStyle()) + } + } + + analyzer := tagalign.NewAnalyzer(options...) + analyzer.Run = func(pass *analysis.Pass) (any, error) { + taIssues := tagalign.Run(pass, options...) + + issues := make([]goanalysis.Issue, len(taIssues)) + for i, issue := range taIssues { + report := &result.Issue{ + FromLinter: analyzer.Name, + Pos: issue.Pos, + Text: issue.Message, + Replacement: &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: issue.InlineFix.StartCol, + Length: issue.InlineFix.Length, + NewString: issue.InlineFix.NewString, + }, + }, + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + if len(issues) == 0 { + return nil, nil + } + + mu.Lock() + resIssues = append(resIssues, issues...) + mu.Unlock() + + return nil, nil + } + + return goanalysis.NewLinter( + analyzer.Name, + analyzer.Doc, + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues + }).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go index 5f58fc1d3b..67c14cbd48 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle.go @@ -11,8 +11,9 @@ import ( func NewTagliatelle(settings *config.TagliatelleSettings) *goanalysis.Linter { cfg := tagliatelle.Config{ Rules: map[string]string{ - "json": "camel", - "yaml": "camel", + "json": "camel", + "yaml": "camel", + "header": "header", }, } @@ -25,6 +26,10 @@ func NewTagliatelle(settings *config.TagliatelleSettings) *goanalysis.Linter { a := tagliatelle.New(cfg) - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, nil). - WithLoadMode(goanalysis.LoadModeSyntax) + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv.go new file mode 100644 index 0000000000..6c6bd3186f --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv.go @@ -0,0 +1,29 @@ +package golinters + +import ( + "github.com/sivchari/tenv" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTenv(settings *config.TenvSettings) *goanalysis.Linter { + a := tenv.Analyzer + + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ + a.Name: { + tenv.A: settings.All, + }, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testableexamples.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testableexamples.go new file mode 100644 index 0000000000..3333593a62 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testableexamples.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/maratori/testableexamples/pkg/testableexamples" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTestableexamples() *goanalysis.Linter { + a := testableexamples.NewAnalyzer() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testifylint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testifylint.go new file mode 100644 index 0000000000..d9cfc04f6e --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testifylint.go @@ -0,0 +1,44 @@ +package golinters + +import ( + "github.com/Antonboom/testifylint/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewTestifylint(settings *config.TestifylintSettings) *goanalysis.Linter { + a := analyzer.New() + + cfg := make(map[string]map[string]any) + if settings != nil { + cfg[a.Name] = map[string]any{ + "enable-all": settings.EnableAll, + "disable-all": settings.DisableAll, + } + if len(settings.EnabledCheckers) > 0 { + cfg[a.Name]["enable"] = settings.EnabledCheckers + } + if len(settings.DisabledCheckers) > 0 { + cfg[a.Name]["disable"] = settings.DisabledCheckers + } + + if p := settings.ExpectedActual.ExpVarPattern; p != "" { + cfg[a.Name]["expected-actual.pattern"] = p + } + if p := settings.RequireError.FnPattern; p != "" { + cfg[a.Name]["require-error.fn-pattern"] = p + } + if m := settings.SuiteExtraAssertCall.Mode; m != "" { + cfg[a.Name]["suite-extra-assert-call.mode"] = m + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go index 1248e78fda..db1ead9661 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage.go @@ -1,6 +1,8 @@ package golinters import ( + "strings" + "github.com/maratori/testpackage/pkg/testpackage" "golang.org/x/tools/go/analysis" @@ -10,14 +12,17 @@ import ( func NewTestpackage(cfg *config.TestpackageSettings) *goanalysis.Linter { var a = testpackage.NewAnalyzer() - var settings map[string]map[string]interface{} + + var settings map[string]map[string]any if cfg != nil { - settings = map[string]map[string]interface{}{ + settings = map[string]map[string]any{ a.Name: { - testpackage.SkipRegexpFlagName: cfg.SkipRegexp, + testpackage.SkipRegexpFlagName: cfg.SkipRegexp, + testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","), }, } } + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go index 1d92f2fbfa..1ae85ef42e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper.go @@ -4,6 +4,7 @@ import ( "strings" "github.com/kulti/thelper/pkg/analyzer" + "golang.org/x/exp/maps" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -13,49 +14,67 @@ import ( func NewThelper(cfg *config.ThelperSettings) *goanalysis.Linter { a := analyzer.NewAnalyzer() - cfgMap := map[string]map[string]interface{}{} - if cfg != nil { - var opts []string + opts := map[string]struct{}{ + "t_name": {}, + "t_begin": {}, + "t_first": {}, - if cfg.Test.Name { - opts = append(opts, "t_name") - } - if cfg.Test.Begin { - opts = append(opts, "t_begin") - } - if cfg.Test.First { - opts = append(opts, "t_first") - } + "f_name": {}, + "f_begin": {}, + "f_first": {}, - if cfg.Benchmark.Name { - opts = append(opts, "b_name") - } - if cfg.Benchmark.Begin { - opts = append(opts, "b_begin") - } - if cfg.Benchmark.First { - opts = append(opts, "b_first") - } + "b_name": {}, + "b_begin": {}, + "b_first": {}, - if cfg.TB.Name { - opts = append(opts, "tb_name") - } - if cfg.TB.Begin { - opts = append(opts, "tb_begin") - } - if cfg.TB.First { - opts = append(opts, "tb_first") - } + "tb_name": {}, + "tb_begin": {}, + "tb_first": {}, + } - cfgMap[a.Name] = map[string]interface{}{ - "checks": strings.Join(opts, ","), - } + if cfg != nil { + applyTHelperOptions(cfg.Test, "t_", opts) + applyTHelperOptions(cfg.Fuzz, "f_", opts) + applyTHelperOptions(cfg.Benchmark, "b_", opts) + applyTHelperOptions(cfg.TB, "tb_", opts) + } + + if len(opts) == 0 { + linterLogger.Fatalf("thelper: at least one option must be enabled") + } + + args := maps.Keys(opts) + + cfgMap := map[string]map[string]any{ + a.Name: { + "checks": strings.Join(args, ","), + }, } return goanalysis.NewLinter( - "thelper", - "thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers", + a.Name, + a.Doc, []*analysis.Analyzer{a}, cfgMap, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func applyTHelperOptions(o config.ThelperOptions, prefix string, opts map[string]struct{}) { + if o.Name != nil { + if !*o.Name { + delete(opts, prefix+"name") + } + } + + if o.Begin != nil { + if !*o.Begin { + delete(opts, prefix+"begin") + } + } + + if o.First != nil { + if !*o.First { + delete(opts, prefix+"first") + } + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go index a4b96eb735..643f2c2710 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tparallel.go @@ -8,14 +8,11 @@ import ( ) func NewTparallel() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - tparallel.Analyzer, - } - + a := tparallel.Analyzer return goanalysis.NewLinter( - "tparallel", - "tparallel detects inappropriate usage of t.Parallel() method in your Go test codes", - analyzers, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go index 24f4339fbd..e9b26ef485 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go @@ -12,17 +12,13 @@ func NewTypecheck() *goanalysis.Linter { analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (interface{}, error) { - return nil, nil - }, + Run: goanalysis.DummyRun, } - linter := goanalysis.NewLinter( + return goanalysis.NewLinter( linterName, "Like the front-end of a Go compiler, parses and type-checks Go code", []*analysis.Analyzer{analyzer}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) - - return linter } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go index 456f6836cc..aad858dfd6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go @@ -11,43 +11,57 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +const unconvertName = "unconvert" + +//nolint:dupl func NewUnconvert() *goanalysis.Linter { - const linterName = "unconvert" var mu sync.Mutex - var res []goanalysis.Issue + var resIssues []goanalysis.Issue + analyzer := &analysis.Analyzer{ - Name: linterName, + Name: unconvertName, Doc: goanalysis.TheOnlyanalyzerDoc, - } - return goanalysis.NewLinter( - linterName, - "Remove unnecessary type conversions", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) + Run: func(pass *analysis.Pass) (any, error) { + issues := runUnconvert(pass) - positions := unconvertAPI.Run(prog) - if len(positions) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, 0, len(positions)) - for _, pos := range positions { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pos, - Text: "unnecessary conversion", - FromLinter: linterName, - }, pass)) - } - mu.Lock() - res = append(res, issues...) + resIssues = append(resIssues, issues...) mu.Unlock() + return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res + }, + } + + return goanalysis.NewLinter( + unconvertName, + "Remove unnecessary type conversions", + []*analysis.Analyzer{analyzer}, + nil, + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { + return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runUnconvert(pass *analysis.Pass) []goanalysis.Issue { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + positions := unconvertAPI.Run(prog) + if len(positions) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, 0, len(positions)) + for _, pos := range positions { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pos, + Text: "unnecessary conversion", + FromLinter: unconvertName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go index 33dd55c9b2..4078d94988 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go @@ -8,69 +8,83 @@ import ( "golang.org/x/tools/go/packages" "mvdan.cc/unparam/check" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewUnparam() *goanalysis.Linter { - const linterName = "unparam" +const unparamName = "unparam" + +func NewUnparam(settings *config.UnparamSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: linterName, + Name: unparamName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, - } - return goanalysis.NewLinter( - linterName, - "Reports unused function parameters", - []*analysis.Analyzer{analyzer}, - nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - us := &lintCtx.Settings().Unparam - if us.Algo != "cha" { - lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") - } - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) - ssaPkg := ssa.Pkg - - pkg := &packages.Package{ - Fset: pass.Fset, - Syntax: pass.Files, - Types: pass.Pkg, - TypesInfo: pass.TypesInfo, - } - - c := &check.Checker{} - c.CheckExportedFuncs(us.CheckExported) - c.Packages([]*packages.Package{pkg}) - c.ProgramSSA(ssaPkg.Prog) - - unparamIssues, err := c.Check() + Run: func(pass *analysis.Pass) (any, error) { + issues, err := runUnparam(pass, settings) if err != nil { return nil, err } - var res []goanalysis.Issue - for _, i := range unparamIssues { - res = append(res, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(i.Pos()), - Text: i.Message(), - FromLinter: linterName, - }, pass)) + if len(issues) == 0 { + return nil, nil } mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil + }, + } + + return goanalysis.NewLinter( + unparamName, + "Reports unused function parameters", + []*analysis.Analyzer{analyzer}, + nil, + ).WithContextSetter(func(lintCtx *linter.Context) { + if settings.Algo != "cha" { + lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") } }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) { + ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + ssaPkg := ssa.Pkg + + pkg := &packages.Package{ + Fset: pass.Fset, + Syntax: pass.Files, + Types: pass.Pkg, + TypesInfo: pass.TypesInfo, + } + + c := &check.Checker{} + c.CheckExportedFuncs(settings.CheckExported) + c.Packages([]*packages.Package{pkg}) + c.ProgramSSA(ssaPkg.Prog) + + unparamIssues, err := c.Check() + if err != nil { + return nil, err + } + + var issues []goanalysis.Issue + for _, i := range unparamIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: pass.Fset.Position(i.Pos()), + Text: i.Message(), + FromLinter: unparamName, + }, pass)) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go index cfdf1f2ca7..a93061c96c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go @@ -5,6 +5,9 @@ import ( "sync" "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/analysis/facts/directives" + "honnef.co/go/tools/analysis/facts/generated" + "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/unused" "github.com/golangci/golangci-lint/pkg/config" @@ -13,37 +16,20 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -type UnusedSettings struct { - GoVersion string -} - -func NewUnused(settings *config.StaticCheckSettings) *goanalysis.Linter { - const name = "unused" +const unusedName = "unused" +func NewUnused(settings *config.UnusedSettings, scSettings *config.StaticCheckSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: name, + Name: unusedName, Doc: unused.Analyzer.Analyzer.Doc, Requires: unused.Analyzer.Analyzer.Requires, - Run: func(pass *analysis.Pass) (interface{}, error) { - res, err := unused.Analyzer.Analyzer.Run(pass) - if err != nil { - return nil, err - } - - sr := unused.Serialize(pass, res.(unused.Result), pass.Fset) - - var issues []goanalysis.Issue - for _, object := range sr.Unused { - issue := goanalysis.NewIssue(&result.Issue{ - FromLinter: name, - Text: fmt.Sprintf("%s %s is unused", object.Kind, object.Name), - Pos: object.Position, - }, pass) - - issues = append(issues, issue) + Run: func(pass *analysis.Pass) (any, error) { + issues := runUnused(pass, settings) + if len(issues) == 0 { + return nil, nil } mu.Lock() @@ -54,16 +40,73 @@ func NewUnused(settings *config.StaticCheckSettings) *goanalysis.Linter { }, } - setAnalyzerGoVersion(analyzer, getGoVersion(settings)) + setAnalyzerGoVersion(analyzer, getGoVersion(scSettings)) - lnt := goanalysis.NewLinter( - name, + return goanalysis.NewLinter( + unusedName, "Checks Go code for unused constants, variables, functions and types", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(lintCtx *linter.Context) []goanalysis.Issue { + ).WithIssuesReporter(func(_ *linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func runUnused(pass *analysis.Pass, cfg *config.UnusedSettings) []goanalysis.Issue { + res := getUnusedResults(pass, cfg) + + used := make(map[string]bool) + for _, obj := range res.Used { + used[fmt.Sprintf("%s %d %s", obj.Position.Filename, obj.Position.Line, obj.Name)] = true + } + + var issues []goanalysis.Issue + + // Inspired by https://github.com/dominikh/go-tools/blob/d694aadcb1f50c2d8ac0a1dd06217ebb9f654764/lintcmd/lint.go#L177-L197 + for _, object := range res.Unused { + if object.Kind == "type param" { + continue + } + + key := fmt.Sprintf("%s %d %s", object.Position.Filename, object.Position.Line, object.Name) + if used[key] { + continue + } + + issue := goanalysis.NewIssue(&result.Issue{ + FromLinter: unusedName, + Text: fmt.Sprintf("%s %s is unused", object.Kind, object.Name), + Pos: object.Position, + }, pass) + + issues = append(issues, issue) + } + + return issues +} + +func getUnusedResults(pass *analysis.Pass, settings *config.UnusedSettings) unused.Result { + opts := unused.Options{ + FieldWritesAreUses: settings.FieldWritesAreUses, + PostStatementsAreReads: settings.PostStatementsAreReads, + ExportedIsUsed: settings.ExportedIsUsed, + ExportedFieldsAreUsed: settings.ExportedFieldsAreUsed, + ParametersAreUsed: settings.ParametersAreUsed, + LocalVariablesAreUsed: settings.LocalVariablesAreUsed, + GeneratedIsUsed: settings.GeneratedIsUsed, + } + + // ref: https://github.com/dominikh/go-tools/blob/4ec1f474ca6c0feb8e10a8fcca4ab95f5b5b9881/internal/cmd/unused/unused.go#L68 + nodes := unused.Graph(pass.Fset, + pass.Files, + pass.Pkg, + pass.TypesInfo, + pass.ResultOf[directives.Analyzer].([]lint.Directive), + pass.ResultOf[generated.Analyzer].(map[string]generated.Generator), + opts, + ) - return lnt + sg := unused.SerializedGraph{} + sg.Merge(nodes) + return sg.Results() } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars.go new file mode 100644 index 0000000000..663a841ac7 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars.go @@ -0,0 +1,38 @@ +package golinters + +import ( + "github.com/sashamelentyev/usestdlibvars/pkg/analyzer" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewUseStdlibVars(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { + a := analyzer.New() + + cfgMap := make(map[string]map[string]any) + if cfg != nil { + cfgMap[a.Name] = map[string]any{ + analyzer.ConstantKindFlag: cfg.ConstantKind, + analyzer.CryptoHashFlag: cfg.CryptoHash, + analyzer.HTTPMethodFlag: cfg.HTTPMethod, + analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode, + analyzer.OSDevNullFlag: cfg.OSDevNull, + analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath, + analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel, + analyzer.SyslogPriorityFlag: cfg.SyslogPriority, + analyzer.TimeLayoutFlag: cfg.TimeLayout, + analyzer.TimeMonthFlag: cfg.TimeMonth, + analyzer.TimeWeekdayFlag: cfg.TimeWeekday, + analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfgMap, + ).WithLoadMode(goanalysis.LoadModeSyntax) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go index 1940f30e3f..1044567a95 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/util.go @@ -2,8 +2,11 @@ package golinters import ( "fmt" + "path/filepath" "strings" + "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" ) @@ -22,3 +25,17 @@ func formatCodeBlock(code string, _ *config.Config) string { return fmt.Sprintf("```\n%s\n```", code) } + +func getFileNames(pass *analysis.Pass) []string { + var fileNames []string + for _, f := range pass.Files { + fileName := pass.Fset.PositionFor(f.Pos(), true).Filename + ext := filepath.Ext(fileName) + if ext != "" && ext != ".go" { + // position has been adjusted to a non-go file, revert to original file + fileName = pass.Fset.PositionFor(f.Pos(), false).Filename + } + fileNames = append(fileNames, fileName) + } + return fileNames +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go index dcf2e7de8e..ea735672f6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go @@ -1,4 +1,4 @@ -package golinters // nolint:dupl +package golinters import ( "fmt" @@ -7,49 +7,66 @@ import ( varcheckAPI "github.com/golangci/check/cmd/varcheck" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewVarcheck() *goanalysis.Linter { - const linterName = "varcheck" +const varcheckName = "varcheck" + +func NewVarcheck(settings *config.VarCheckSettings) *goanalysis.Linter { var mu sync.Mutex - var res []goanalysis.Issue + var resIssues []goanalysis.Issue + analyzer := &analysis.Analyzer{ - Name: linterName, + Name: varcheckName, Doc: goanalysis.TheOnlyanalyzerDoc, + Run: goanalysis.DummyRun, } + return goanalysis.NewLinter( - linterName, + varcheckName, "Finds unused global variables and constants", []*analysis.Analyzer{analyzer}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - checkExported := lintCtx.Settings().Varcheck.CheckExportedFields - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - prog := goanalysis.MakeFakeLoaderProgram(pass) + ).WithContextSetter(func(_ *linter.Context) { + analyzer.Run = func(pass *analysis.Pass) (any, error) { + issues := runVarCheck(pass, settings) - varcheckIssues := varcheckAPI.Run(prog, checkExported) - if len(varcheckIssues) == 0 { + if len(issues) == 0 { return nil, nil } - issues := make([]goanalysis.Issue, 0, len(varcheckIssues)) - for _, i := range varcheckIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: fmt.Sprintf("%s is unused", formatCode(i.VarName, lintCtx.Cfg)), - FromLinter: linterName, - }, pass)) - } - mu.Lock() - res = append(res, issues...) + resIssues = append(resIssues, issues...) mu.Unlock() + return nil, nil } }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return res + return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } + +//nolint:dupl +func runVarCheck(pass *analysis.Pass, settings *config.VarCheckSettings) []goanalysis.Issue { + prog := goanalysis.MakeFakeLoaderProgram(pass) + + lintIssues := varcheckAPI.Run(prog, settings.CheckExportedFields) + if len(lintIssues) == 0 { + return nil + } + + issues := make([]goanalysis.Issue, 0, len(lintIssues)) + + for _, i := range lintIssues { + issues = append(issues, goanalysis.NewIssue(&result.Issue{ + Pos: i.Pos, + Text: fmt.Sprintf("%s is unused", formatCode(i.VarName, nil)), + FromLinter: varcheckName, + }, pass)) + } + + return issues +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varnamelen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varnamelen.go new file mode 100644 index 0000000000..688dfa8046 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varnamelen.go @@ -0,0 +1,46 @@ +package golinters + +import ( + "strconv" + "strings" + + "github.com/blizzy78/varnamelen" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewVarnamelen(settings *config.VarnamelenSettings) *goanalysis.Linter { + analyzer := varnamelen.NewAnalyzer() + cfg := map[string]map[string]any{} + + if settings != nil { + vnlCfg := map[string]any{ + "checkReceiver": strconv.FormatBool(settings.CheckReceiver), + "checkReturn": strconv.FormatBool(settings.CheckReturn), + "checkTypeParam": strconv.FormatBool(settings.CheckTypeParam), + "ignoreNames": strings.Join(settings.IgnoreNames, ","), + "ignoreTypeAssertOk": strconv.FormatBool(settings.IgnoreTypeAssertOk), + "ignoreMapIndexOk": strconv.FormatBool(settings.IgnoreMapIndexOk), + "ignoreChanRecvOk": strconv.FormatBool(settings.IgnoreChanRecvOk), + "ignoreDecls": strings.Join(settings.IgnoreDecls, ","), + } + + if settings.MaxDistance > 0 { + vnlCfg["maxDistance"] = strconv.Itoa(settings.MaxDistance) + } + if settings.MinNameLength > 0 { + vnlCfg["minNameLength"] = strconv.Itoa(settings.MinNameLength) + } + + cfg[analyzer.Name] = vnlCfg + } + + return goanalysis.NewLinter( + analyzer.Name, + "checks that the length of a variable's name matches its scope", + []*analysis.Analyzer{analyzer}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go index d359fb0195..9038c827dc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wastedassign.go @@ -8,14 +8,12 @@ import ( ) func NewWastedAssign() *goanalysis.Linter { - analyzers := []*analysis.Analyzer{ - wastedassign.Analyzer, - } + a := wastedassign.Analyzer return goanalysis.NewLinter( - "wastedassign", - "wastedassign finds wasted assignment statements.", - analyzers, + a.Name, + "Finds wasted assignment statements", + []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go index d475465a21..5487b1016a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go @@ -1,80 +1,53 @@ package golinters import ( - "go/token" + "fmt" "sync" - "github.com/pkg/errors" "github.com/ultraware/whitespace" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) -func NewWhitespace() *goanalysis.Linter { - const linterName = "whitespace" +const whitespaceName = "whitespace" + +func NewWhitespace(settings *config.WhitespaceSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, + var wsSettings whitespace.Settings + if settings != nil { + wsSettings = whitespace.Settings{ + Mode: whitespace.RunningModeGolangCI, + MultiIf: settings.MultiIf, + MultiFunc: settings.MultiFunc, + } } + + a := whitespace.NewAnalyzer(&wsSettings) + return goanalysis.NewLinter( - linterName, - "Tool for detection of leading and trailing whitespace", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - cfg := lintCtx.Cfg.LintersSettings.Whitespace - settings := whitespace.Settings{MultiIf: cfg.MultiIf, MultiFunc: cfg.MultiFunc} - - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var issues []whitespace.Message - for _, file := range pass.Files { - issues = append(issues, whitespace.Run(file, pass.Fset, settings)...) + ).WithContextSetter(func(_ *linter.Context) { + a.Run = func(pass *analysis.Pass) (any, error) { + issues, err := runWhitespace(pass, wsSettings) + if err != nil { + return nil, err } if len(issues) == 0 { return nil, nil } - res := make([]goanalysis.Issue, len(issues)) - for k, i := range issues { - issue := result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - LineRange: &result.Range{From: i.Pos.Line, To: i.Pos.Line}, - Text: i.Message, - FromLinter: linterName, - Replacement: &result.Replacement{}, - } - - bracketLine, err := lintCtx.LineCache.GetLine(issue.Pos.Filename, issue.Pos.Line) - if err != nil { - return nil, errors.Wrapf(err, "failed to get line %s:%d", issue.Pos.Filename, issue.Pos.Line) - } - - switch i.Type { - case whitespace.MessageTypeLeading: - issue.LineRange.To++ // cover two lines by the issue: opening bracket "{" (issue.Pos.Line) and following empty line - case whitespace.MessageTypeTrailing: - issue.LineRange.From-- // cover two lines by the issue: closing bracket "}" (issue.Pos.Line) and preceding empty line - issue.Pos.Line-- // set in sync with LineRange.From to not break fixer and other code features - case whitespace.MessageTypeAddAfter: - bracketLine += "\n" - } - issue.Replacement.NewLines = []string{bracketLine} - - res[k] = goanalysis.NewIssue(&issue, pass) - } - mu.Lock() - resIssues = append(resIssues, res...) + resIssues = append(resIssues, issues...) mu.Unlock() return nil, nil @@ -83,3 +56,47 @@ func NewWhitespace() *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } + +func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) { + lintIssues := whitespace.Run(pass, &wsSettings) + + issues := make([]goanalysis.Issue, len(lintIssues)) + for i, issue := range lintIssues { + report := &result.Issue{ + FromLinter: whitespaceName, + Pos: pass.Fset.PositionFor(issue.Diagnostic, false), + Text: issue.Message, + } + + switch issue.MessageType { + case whitespace.MessageTypeRemove: + if len(issue.LineNumbers) == 0 { + continue + } + + report.LineRange = &result.Range{ + From: issue.LineNumbers[0], + To: issue.LineNumbers[len(issue.LineNumbers)-1], + } + + report.Replacement = &result.Replacement{NeedOnlyDelete: true} + + case whitespace.MessageTypeAdd: + report.Pos = pass.Fset.PositionFor(issue.FixStart, false) + report.Replacement = &result.Replacement{ + Inline: &result.InlineFix{ + StartCol: 0, + Length: 1, + NewString: "\n\t", + }, + } + + default: + return nil, fmt.Errorf("unknown message type: %v", issue.MessageType) + } + + issues[i] = goanalysis.NewIssue(report, pass) + } + + return issues, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go index 7717d188ac..6d25db4277 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck.go @@ -8,20 +8,27 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" ) -const wrapcheckName = "wrapcheck" - func NewWrapcheck(settings *config.WrapcheckSettings) *goanalysis.Linter { cfg := wrapcheck.NewDefaultConfig() if settings != nil { if len(settings.IgnoreSigs) != 0 { cfg.IgnoreSigs = settings.IgnoreSigs } + if len(settings.IgnoreSigRegexps) != 0 { + cfg.IgnoreSigRegexps = settings.IgnoreSigRegexps + } + if len(settings.IgnorePackageGlobs) != 0 { + cfg.IgnorePackageGlobs = settings.IgnorePackageGlobs + } + if len(settings.IgnoreInterfaceRegexps) != 0 { + cfg.IgnoreInterfaceRegexps = settings.IgnoreInterfaceRegexps + } } a := wrapcheck.NewAnalyzer(cfg) return goanalysis.NewLinter( - wrapcheckName, + a.Name, a.Doc, []*analysis.Analyzer{a}, nil, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go index 29d00faea5..3b090a686e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go @@ -1,83 +1,39 @@ package golinters import ( - "sync" - - "github.com/bombsimon/wsl/v3" + "github.com/bombsimon/wsl/v4" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" -) - -const ( - name = "wsl" ) -// NewWSL returns a new WSL linter. -func NewWSL() *goanalysis.Linter { - var ( - issues []goanalysis.Issue - mu = sync.Mutex{} - analyzer = &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, - Doc: goanalysis.TheOnlyanalyzerDoc, +func NewWSL(settings *config.WSLSettings) *goanalysis.Linter { + var conf *wsl.Configuration + if settings != nil { + conf = &wsl.Configuration{ + StrictAppend: settings.StrictAppend, + AllowAssignAndCallCuddle: settings.AllowAssignAndCallCuddle, + AllowAssignAndAnythingCuddle: settings.AllowAssignAndAnythingCuddle, + AllowMultiLineAssignCuddle: settings.AllowMultiLineAssignCuddle, + ForceCaseTrailingWhitespaceLimit: settings.ForceCaseTrailingWhitespaceLimit, + AllowTrailingComment: settings.AllowTrailingComment, + AllowSeparatedLeadingComment: settings.AllowSeparatedLeadingComment, + AllowCuddleDeclaration: settings.AllowCuddleDeclaration, + AllowCuddleWithCalls: settings.AllowCuddleWithCalls, + AllowCuddleWithRHS: settings.AllowCuddleWithRHS, + ForceCuddleErrCheckAndAssign: settings.ForceCuddleErrCheckAndAssign, + ErrorVariableNames: settings.ErrorVariableNames, + ForceExclusiveShortDeclarations: settings.ForceExclusiveShortDeclarations, } - ) + } + + a := wsl.NewAnalyzer(conf) return goanalysis.NewLinter( - name, - "Whitespace Linter - Forces you to use empty lines!", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var ( - files = []string{} - linterCfg = lintCtx.Cfg.LintersSettings.WSL - processorCfg = wsl.Configuration{ - StrictAppend: linterCfg.StrictAppend, - AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, - AllowAssignAndAnythingCuddle: linterCfg.AllowAssignAndAnythingCuddle, - AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, - AllowCuddleDeclaration: linterCfg.AllowCuddleDeclaration, - AllowTrailingComment: linterCfg.AllowTrailingComment, - AllowSeparatedLeadingComment: linterCfg.AllowSeparatedLeadingComment, - ForceCuddleErrCheckAndAssign: linterCfg.ForceCuddleErrCheckAndAssign, - ForceCaseTrailingWhitespaceLimit: linterCfg.ForceCaseTrailingWhitespaceLimit, - ForceExclusiveShortDeclarations: linterCfg.ForceExclusiveShortDeclarations, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, - ErrorVariableNames: []string{"err"}, - } - ) - - for _, file := range pass.Files { - files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) - } - - wslErrors, _ := wsl.NewProcessorWithConfig(processorCfg). - ProcessFiles(files) - - if len(wslErrors) == 0 { - return nil, nil - } - - mu.Lock() - defer mu.Unlock() - - for _, err := range wslErrors { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - FromLinter: name, - Pos: err.Position, - Text: err.Reason, - }, pass)) - } - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/zerologlint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/zerologlint.go new file mode 100644 index 0000000000..edde726655 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/zerologlint.go @@ -0,0 +1,19 @@ +package golinters + +import ( + "github.com/ykadowak/zerologlint" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" +) + +func NewZerologLint() *goanalysis.Linter { + a := zerologlint.Analyzer + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go index 1c05b98050..93922f85a7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/goutil/env.go @@ -3,13 +3,12 @@ package goutil import ( "context" "encoding/json" + "fmt" "os" "os/exec" "strings" "time" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/logutils" ) @@ -30,7 +29,7 @@ func NewEnv(log logutils.Log) *Env { return &Env{ vars: map[string]string{}, log: log, - debugf: logutils.Debug("env"), + debugf: logutils.Debug(logutils.DebugKeyEnv), } } @@ -40,11 +39,11 @@ func (e *Env) Discover(ctx context.Context) error { args = append(args, string(EnvGoCache), string(EnvGoRoot)) out, err := exec.CommandContext(ctx, "go", args...).Output() if err != nil { - return errors.Wrap(err, "failed to run 'go env'") + return fmt.Errorf("failed to run 'go env': %w", err) } if err = json.Unmarshal(out, &e.vars); err != nil { - return errors.Wrapf(err, "failed to parse 'go %s' json", strings.Join(args, " ")) + return fmt.Errorf("failed to parse 'go %s' json: %w", strings.Join(args, " "), err) } e.debugf("Read go env for %s: %#v", time.Since(startedAt), e.vars) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go index 2372a011e3..ed5e5508ce 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -1,7 +1,10 @@ package linter import ( + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" + + "github.com/golangci/golangci-lint/pkg/config" ) const ( @@ -39,6 +42,7 @@ type Config struct { AlternativeNames []string OriginalURL string // URL of original (not forked) repo, needed for autogenerated README + Internal bool // Internal linters cannot be disabled (ex: typecheck). CanAutoFix bool IsSlow bool DoesChangeTypes bool @@ -47,6 +51,16 @@ type Config struct { Deprecation *Deprecation } +func (lc *Config) WithEnabledByDefault() *Config { + lc.EnabledByDefault = true + return lc +} + +func (lc *Config) WithInternal() *Config { + lc.Internal = true + return lc +} + func (lc *Config) ConsiderSlow() *Config { lc.IsSlow = true return lc @@ -63,7 +77,7 @@ func (lc *Config) WithLoadFiles() *Config { func (lc *Config) WithLoadForGoAnalysis() *Config { lc = lc.WithLoadFiles() - lc.LoadMode |= packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedTypesSizes + lc.LoadMode |= packages.NeedImports | packages.NeedDeps | packages.NeedExportFile | packages.NeedTypesSizes lc.IsSlow = true return lc } @@ -119,6 +133,23 @@ func (lc *Config) Name() string { return lc.Linter.Name() } +func (lc *Config) WithNoopFallback(cfg *config.Config) *Config { + if cfg != nil && config.IsGreaterThanOrEqualGo122(cfg.Run.Go) { + lc.Linter = &Noop{ + name: lc.Linter.Name(), + desc: lc.Linter.Desc(), + run: func(_ *analysis.Pass) (any, error) { + return nil, nil + }, + } + + lc.LoadMode = 0 + return lc.WithLoadFiles() + } + + return lc +} + func NewConfig(linter Linter) *Config { lc := &Config{ Linter: linter, diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go index cfe9ec0209..a65d6b9278 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/linter.go @@ -3,6 +3,8 @@ package linter import ( "context" + "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/result" ) @@ -11,3 +13,23 @@ type Linter interface { Name() string Desc() string } + +type Noop struct { + name string + desc string + run func(pass *analysis.Pass) (any, error) +} + +func (n Noop) Run(_ context.Context, lintCtx *Context) ([]result.Issue, error) { + lintCtx.Log.Warnf("%s is disabled because of generics."+ + " You can track the evolution of the generics support by following the https://github.com/golangci/golangci-lint/issues/2649.", n.name) + return nil, nil +} + +func (n Noop) Name() string { + return n.name +} + +func (n Noop) Desc() string { + return n.desc +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/custom_linters.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/custom_linters.go new file mode 100644 index 0000000000..c4378d0a2c --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/custom_linters.go @@ -0,0 +1,130 @@ +package lintersdb + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "plugin" + + "github.com/spf13/viper" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" + "github.com/golangci/golangci-lint/pkg/lint/linter" +) + +type AnalyzerPlugin interface { + GetAnalyzers() []*analysis.Analyzer +} + +// getCustomLinterConfigs loads private linters that are specified in the golangci config file. +func (m *Manager) getCustomLinterConfigs() []*linter.Config { + if m.cfg == nil || m.log == nil { + return nil + } + + var linters []*linter.Config + + for name, settings := range m.cfg.LintersSettings.Custom { + lc, err := m.loadCustomLinterConfig(name, settings) + if err != nil { + m.log.Errorf("Unable to load custom analyzer %s:%s, %v", name, settings.Path, err) + } else { + linters = append(linters, lc) + } + } + + return linters +} + +// loadCustomLinterConfig loads the configuration of private linters. +// Private linters are dynamically loaded from .so plugin files. +func (m *Manager) loadCustomLinterConfig(name string, settings config.CustomLinterSettings) (*linter.Config, error) { + analyzers, err := m.getAnalyzerPlugin(settings.Path, settings.Settings) + if err != nil { + return nil, err + } + + m.log.Infof("Loaded %s: %s", settings.Path, name) + + customLinter := goanalysis.NewLinter(name, settings.Description, analyzers, nil). + WithLoadMode(goanalysis.LoadModeTypesInfo) + + linterConfig := linter.NewConfig(customLinter). + WithEnabledByDefault(). + WithLoadForGoAnalysis(). + WithURL(settings.OriginalURL) + + return linterConfig, nil +} + +// getAnalyzerPlugin loads a private linter as specified in the config file, +// loads the plugin from a .so file, +// and returns the 'AnalyzerPlugin' interface implemented by the private plugin. +// An error is returned if the private linter cannot be loaded +// or the linter does not implement the AnalyzerPlugin interface. +func (m *Manager) getAnalyzerPlugin(path string, settings any) ([]*analysis.Analyzer, error) { + if !filepath.IsAbs(path) { + // resolve non-absolute paths relative to config file's directory + configFilePath := viper.ConfigFileUsed() + absConfigFilePath, err := filepath.Abs(configFilePath) + if err != nil { + return nil, fmt.Errorf("could not get absolute representation of config file path %q: %w", configFilePath, err) + } + path = filepath.Join(filepath.Dir(absConfigFilePath), path) + } + + plug, err := plugin.Open(path) + if err != nil { + return nil, err + } + + analyzers, err := m.lookupPlugin(plug, settings) + if err != nil { + return nil, fmt.Errorf("lookup plugin %s: %w", path, err) + } + + return analyzers, nil +} + +func (m *Manager) lookupPlugin(plug *plugin.Plugin, settings any) ([]*analysis.Analyzer, error) { + symbol, err := plug.Lookup("New") + if err != nil { + analyzers, errP := m.lookupAnalyzerPlugin(plug) + if errP != nil { + return nil, errors.Join(err, errP) + } + + return analyzers, nil + } + + // The type func cannot be used here, must be the explicit signature. + constructor, ok := symbol.(func(any) ([]*analysis.Analyzer, error)) + if !ok { + return nil, fmt.Errorf("plugin does not abide by 'New' function: %T", symbol) + } + + return constructor(settings) +} + +func (m *Manager) lookupAnalyzerPlugin(plug *plugin.Plugin) ([]*analysis.Analyzer, error) { + symbol, err := plug.Lookup("AnalyzerPlugin") + if err != nil { + return nil, err + } + + // TODO(ldez): remove this env var (but keep the log) in the next minor version (v1.55.0) + if _, ok := os.LookupEnv("GOLANGCI_LINT_HIDE_WARNING_ABOUT_PLUGIN_API_DEPRECATION"); !ok { + m.log.Warnf("plugin: 'AnalyzerPlugin' plugins are deprecated, please use the new plugin signature: " + + "https://golangci-lint.run/contributing/new-linters/#create-a-plugin") + } + + analyzerPlugin, ok := symbol.(AnalyzerPlugin) + if !ok { + return nil, fmt.Errorf("plugin does not abide by 'AnalyzerPlugin' interface: %T", symbol) + } + + return analyzerPlugin.GetAnalyzers(), nil +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go index 9814aa857e..6f7b91b4d1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/enabled_set.go @@ -3,7 +3,8 @@ package lintersdb import ( "os" "sort" - "strings" + + "golang.org/x/exp/maps" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" @@ -11,6 +12,9 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) +// EnvTestRun value: "1" +const EnvTestRun = "GL_TEST_RUN" + type EnabledSet struct { m *Manager v *Validator @@ -25,12 +29,14 @@ func NewEnabledSet(m *Manager, v *Validator, log logutils.Log, cfg *config.Confi v: v, log: log, cfg: cfg, - debugf: logutils.Debug("enabled_linters"), + debugf: logutils.Debug(logutils.DebugKeyEnabledLinters), } } +//nolint:gocyclo // the complexity cannot be reduced. func (es EnabledSet) build(lcfg *config.Linters, enabledByDefaultLinters []*linter.Config) map[string]*linter.Config { es.debugf("Linters config: %#v", lcfg) + resultLintersSet := map[string]*linter.Config{} switch { case len(lcfg.Presets) != 0: @@ -76,6 +82,14 @@ func (es EnabledSet) build(lcfg *config.Linters, enabledByDefaultLinters []*lint } } + // typecheck is not a real linter and cannot be disabled. + if _, ok := resultLintersSet["typecheck"]; !ok && (es.cfg == nil || !es.cfg.InternalCmdTest) { + for _, lc := range es.m.GetLinterConfigs("typecheck") { + // it's important to use lc.Name() nor name because name can be alias + resultLintersSet[lc.Name()] = lc + } + } + return resultLintersSet } @@ -85,7 +99,7 @@ func (es EnabledSet) GetEnabledLintersMap() (map[string]*linter.Config, error) { } enabledLinters := es.build(&es.cfg.Linters, es.m.GetAllEnabledByDefaultLinters()) - if os.Getenv("GL_TEST_RUN") == "1" { + if os.Getenv(EnvTestRun) == "1" { es.verbosePrintLintersStatus(enabledLinters) } return enabledLinters, nil @@ -103,10 +117,7 @@ func (es EnabledSet) GetOptimizedLinters() ([]*linter.Config, error) { es.verbosePrintLintersStatus(resultLintersSet) es.combineGoAnalysisLinters(resultLintersSet) - var resultLinters []*linter.Config - for _, lc := range resultLintersSet { - resultLinters = append(resultLinters, lc) - } + resultLinters := maps.Values(resultLintersSet) // Make order of execution of linters (go/analysis metalinter and unused) stable. sort.Slice(resultLinters, func(i, j int) bool { @@ -123,7 +134,7 @@ func (es EnabledSet) GetOptimizedLinters() ([]*linter.Config, error) { if a.DoesChangeTypes != b.DoesChangeTypes { return b.DoesChangeTypes // move type-changing linters to the end to optimize speed } - return strings.Compare(a.Name(), b.Name()) < 0 + return a.Name() < b.Name() }) return resultLinters, nil @@ -132,8 +143,8 @@ func (es EnabledSet) GetOptimizedLinters() ([]*linter.Config, error) { func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) { var goanalysisLinters []*goanalysis.Linter goanalysisPresets := map[string]bool{} - for _, linter := range linters { - lnt, ok := linter.Linter.(*goanalysis.Linter) + for _, lc := range linters { + lnt, ok := lc.Linter.(*goanalysis.Linter) if !ok { continue } @@ -142,7 +153,7 @@ func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) continue } goanalysisLinters = append(goanalysisLinters, lnt) - for _, p := range linter.InPresets { + for _, p := range lc.InPresets { goanalysisPresets[p] = true } } @@ -168,15 +179,12 @@ func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) return false } - return strings.Compare(a.Name(), b.Name()) <= 0 + return a.Name() <= b.Name() }) ml := goanalysis.NewMetaLinter(goanalysisLinters) - var presets []string - for p := range goanalysisPresets { - presets = append(presets, p) - } + presets := maps.Keys(goanalysisPresets) mlConfig := &linter.Config{ Linter: ml, @@ -195,6 +203,10 @@ func (es EnabledSet) combineGoAnalysisLinters(linters map[string]*linter.Config) func (es EnabledSet) verbosePrintLintersStatus(lcs map[string]*linter.Config) { var linterNames []string for _, lc := range lcs { + if lc.Internal { + continue + } + linterNames = append(linterNames, lc.Name()) } sort.StringSlice(linterNames).Sort() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go index fc760dc34a..4a3c1b4d64 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go @@ -1,29 +1,26 @@ package lintersdb import ( - "fmt" - "path/filepath" - "plugin" - - "github.com/spf13/viper" - "golang.org/x/tools/go/analysis" + "regexp" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/golinters" - "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/report" ) type Manager struct { - nameToLCs map[string][]*linter.Config - cfg *config.Config - log logutils.Log + cfg *config.Config + log logutils.Log + + nameToLCs map[string][]*linter.Config + customLinters []*linter.Config } func NewManager(cfg *config.Config, log logutils.Log) *Manager { m := &Manager{cfg: cfg, log: log} + m.customLinters = m.getCustomLinterConfigs() + nameToLCs := make(map[string][]*linter.Config) for _, lc := range m.GetAllSupportedLinterConfigs() { for _, name := range lc.AllNames() { @@ -32,27 +29,7 @@ func NewManager(cfg *config.Config, log logutils.Log) *Manager { } m.nameToLCs = nameToLCs - return m -} -func (m *Manager) WithCustomLinters() *Manager { - if m.log == nil { - m.log = report.NewLogWrapper(logutils.NewStderrLog(""), &report.Data{}) - } - if m.cfg != nil { - for name, settings := range m.cfg.LintersSettings.Custom { - lc, err := m.loadCustomLinterConfig(name, settings) - - if err != nil { - m.log.Errorf("Unable to load custom analyzer %s:%s, %v", - name, - settings.Path, - err) - } else { - m.nameToLCs[name] = append(m.nameToLCs[name], lc) - } - } - } return m } @@ -86,444 +63,884 @@ func (m Manager) GetLinterConfigs(name string) []*linter.Config { return m.nameToLCs[name] } -func enableLinterConfigs(lcs []*linter.Config, isEnabled func(lc *linter.Config) bool) []*linter.Config { - var ret []*linter.Config - for _, lc := range lcs { - lc := lc - lc.EnabledByDefault = isEnabled(lc) - ret = append(ret, lc) - } - - return ret -} - //nolint:funlen func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config { - var govetCfg *config.GovetSettings - var testpackageCfg *config.TestpackageSettings - var exhaustiveCfg *config.ExhaustiveSettings - var exhaustiveStructCfg *config.ExhaustiveStructSettings - var errorlintCfg *config.ErrorLintSettings - var thelperCfg *config.ThelperSettings - var predeclaredCfg *config.PredeclaredSettings - var ifshortCfg *config.IfshortSettings - var reviveCfg *config.ReviveSettings - var cyclopCfg *config.Cyclop - var importAsCfg *config.ImportAsSettings - var goModDirectivesCfg *config.GoModDirectivesSettings - var tagliatelleCfg *config.TagliatelleSettings - var gosecCfg *config.GoSecSettings - var gosimpleCfg *config.StaticCheckSettings - var staticcheckCfg *config.StaticCheckSettings - var stylecheckCfg *config.StaticCheckSettings - var unusedCfg *config.StaticCheckSettings - var wrapcheckCfg *config.WrapcheckSettings + var ( + asasalintCfg *config.AsasalintSettings + bidichkCfg *config.BiDiChkSettings + cyclopCfg *config.Cyclop + decorderCfg *config.DecorderSettings + depGuardCfg *config.DepGuardSettings + dogsledCfg *config.DogsledSettings + duplCfg *config.DuplSettings + dupwordCfg *config.DupWordSettings + errcheckCfg *config.ErrcheckSettings + errchkjsonCfg *config.ErrChkJSONSettings + errorlintCfg *config.ErrorLintSettings + exhaustiveCfg *config.ExhaustiveSettings + exhaustiveStructCfg *config.ExhaustiveStructSettings + exhaustructCfg *config.ExhaustructSettings + forbidigoCfg *config.ForbidigoSettings + funlenCfg *config.FunlenSettings + gciCfg *config.GciSettings + ginkgolinterCfg *config.GinkgoLinterSettings + gocognitCfg *config.GocognitSettings + goconstCfg *config.GoConstSettings + gocriticCfg *config.GoCriticSettings + gocycloCfg *config.GoCycloSettings + godotCfg *config.GodotSettings + godoxCfg *config.GodoxSettings + gofmtCfg *config.GoFmtSettings + gofumptCfg *config.GofumptSettings + goheaderCfg *config.GoHeaderSettings + goimportsCfg *config.GoImportsSettings + golintCfg *config.GoLintSettings + goMndCfg *config.GoMndSettings + goModDirectivesCfg *config.GoModDirectivesSettings + gomodguardCfg *config.GoModGuardSettings + gosecCfg *config.GoSecSettings + gosimpleCfg *config.StaticCheckSettings + gosmopolitanCfg *config.GosmopolitanSettings + govetCfg *config.GovetSettings + grouperCfg *config.GrouperSettings + ifshortCfg *config.IfshortSettings + importAsCfg *config.ImportAsSettings + inamedparamCfg *config.INamedParamSettings + interfaceBloatCfg *config.InterfaceBloatSettings + ireturnCfg *config.IreturnSettings + lllCfg *config.LllSettings + loggerCheckCfg *config.LoggerCheckSettings + maintIdxCfg *config.MaintIdxSettings + makezeroCfg *config.MakezeroSettings + malignedCfg *config.MalignedSettings + misspellCfg *config.MisspellSettings + musttagCfg *config.MustTagSettings + nakedretCfg *config.NakedretSettings + nestifCfg *config.NestifSettings + nilNilCfg *config.NilNilSettings + nlreturnCfg *config.NlreturnSettings + noLintLintCfg *config.NoLintLintSettings + noNamedReturnsCfg *config.NoNamedReturnsSettings + parallelTestCfg *config.ParallelTestSettings + perfSprintCfg *config.PerfSprintSettings + preallocCfg *config.PreallocSettings + predeclaredCfg *config.PredeclaredSettings + promlinterCfg *config.PromlinterSettings + protogetterCfg *config.ProtoGetterSettings + reassignCfg *config.ReassignSettings + reviveCfg *config.ReviveSettings + rowserrcheckCfg *config.RowsErrCheckSettings + sloglintCfg *config.SlogLintSettings + spancheckCfg *config.SpancheckSettings + staticcheckCfg *config.StaticCheckSettings + structcheckCfg *config.StructCheckSettings + stylecheckCfg *config.StaticCheckSettings + tagalignCfg *config.TagAlignSettings + tagliatelleCfg *config.TagliatelleSettings + tenvCfg *config.TenvSettings + testifylintCfg *config.TestifylintSettings + testpackageCfg *config.TestpackageSettings + thelperCfg *config.ThelperSettings + unparamCfg *config.UnparamSettings + unusedCfg *config.UnusedSettings + usestdlibvars *config.UseStdlibVarsSettings + varcheckCfg *config.VarCheckSettings + varnamelenCfg *config.VarnamelenSettings + whitespaceCfg *config.WhitespaceSettings + wrapcheckCfg *config.WrapcheckSettings + wslCfg *config.WSLSettings + ) if m.cfg != nil { - govetCfg = &m.cfg.LintersSettings.Govet - testpackageCfg = &m.cfg.LintersSettings.Testpackage + asasalintCfg = &m.cfg.LintersSettings.Asasalint + bidichkCfg = &m.cfg.LintersSettings.BiDiChk + cyclopCfg = &m.cfg.LintersSettings.Cyclop + decorderCfg = &m.cfg.LintersSettings.Decorder + depGuardCfg = &m.cfg.LintersSettings.Depguard + dogsledCfg = &m.cfg.LintersSettings.Dogsled + duplCfg = &m.cfg.LintersSettings.Dupl + dupwordCfg = &m.cfg.LintersSettings.DupWord + errcheckCfg = &m.cfg.LintersSettings.Errcheck + errchkjsonCfg = &m.cfg.LintersSettings.ErrChkJSON + errorlintCfg = &m.cfg.LintersSettings.ErrorLint exhaustiveCfg = &m.cfg.LintersSettings.Exhaustive exhaustiveStructCfg = &m.cfg.LintersSettings.ExhaustiveStruct - errorlintCfg = &m.cfg.LintersSettings.ErrorLint - thelperCfg = &m.cfg.LintersSettings.Thelper - predeclaredCfg = &m.cfg.LintersSettings.Predeclared - ifshortCfg = &m.cfg.LintersSettings.Ifshort - reviveCfg = &m.cfg.LintersSettings.Revive - cyclopCfg = &m.cfg.LintersSettings.Cyclop - importAsCfg = &m.cfg.LintersSettings.ImportAs + exhaustructCfg = &m.cfg.LintersSettings.Exhaustruct + forbidigoCfg = &m.cfg.LintersSettings.Forbidigo + funlenCfg = &m.cfg.LintersSettings.Funlen + gciCfg = &m.cfg.LintersSettings.Gci + ginkgolinterCfg = &m.cfg.LintersSettings.GinkgoLinter + gocognitCfg = &m.cfg.LintersSettings.Gocognit + goconstCfg = &m.cfg.LintersSettings.Goconst + gocriticCfg = &m.cfg.LintersSettings.Gocritic + gocycloCfg = &m.cfg.LintersSettings.Gocyclo + godotCfg = &m.cfg.LintersSettings.Godot + godoxCfg = &m.cfg.LintersSettings.Godox + gofmtCfg = &m.cfg.LintersSettings.Gofmt + gofumptCfg = &m.cfg.LintersSettings.Gofumpt + goheaderCfg = &m.cfg.LintersSettings.Goheader + goimportsCfg = &m.cfg.LintersSettings.Goimports + golintCfg = &m.cfg.LintersSettings.Golint + goMndCfg = &m.cfg.LintersSettings.Gomnd goModDirectivesCfg = &m.cfg.LintersSettings.GoModDirectives - tagliatelleCfg = &m.cfg.LintersSettings.Tagliatelle + gomodguardCfg = &m.cfg.LintersSettings.Gomodguard gosecCfg = &m.cfg.LintersSettings.Gosec gosimpleCfg = &m.cfg.LintersSettings.Gosimple + gosmopolitanCfg = &m.cfg.LintersSettings.Gosmopolitan + govetCfg = &m.cfg.LintersSettings.Govet + grouperCfg = &m.cfg.LintersSettings.Grouper + ifshortCfg = &m.cfg.LintersSettings.Ifshort + importAsCfg = &m.cfg.LintersSettings.ImportAs + inamedparamCfg = &m.cfg.LintersSettings.Inamedparam + interfaceBloatCfg = &m.cfg.LintersSettings.InterfaceBloat + ireturnCfg = &m.cfg.LintersSettings.Ireturn + lllCfg = &m.cfg.LintersSettings.Lll + loggerCheckCfg = &m.cfg.LintersSettings.LoggerCheck + maintIdxCfg = &m.cfg.LintersSettings.MaintIdx + makezeroCfg = &m.cfg.LintersSettings.Makezero + malignedCfg = &m.cfg.LintersSettings.Maligned + misspellCfg = &m.cfg.LintersSettings.Misspell + musttagCfg = &m.cfg.LintersSettings.MustTag + nakedretCfg = &m.cfg.LintersSettings.Nakedret + nestifCfg = &m.cfg.LintersSettings.Nestif + nilNilCfg = &m.cfg.LintersSettings.NilNil + nlreturnCfg = &m.cfg.LintersSettings.Nlreturn + noLintLintCfg = &m.cfg.LintersSettings.NoLintLint + noNamedReturnsCfg = &m.cfg.LintersSettings.NoNamedReturns + parallelTestCfg = &m.cfg.LintersSettings.ParallelTest + perfSprintCfg = &m.cfg.LintersSettings.PerfSprint + preallocCfg = &m.cfg.LintersSettings.Prealloc + predeclaredCfg = &m.cfg.LintersSettings.Predeclared + promlinterCfg = &m.cfg.LintersSettings.Promlinter + protogetterCfg = &m.cfg.LintersSettings.ProtoGetter + reassignCfg = &m.cfg.LintersSettings.Reassign + reviveCfg = &m.cfg.LintersSettings.Revive + rowserrcheckCfg = &m.cfg.LintersSettings.RowsErrCheck + sloglintCfg = &m.cfg.LintersSettings.SlogLint + spancheckCfg = &m.cfg.LintersSettings.Spancheck staticcheckCfg = &m.cfg.LintersSettings.Staticcheck + structcheckCfg = &m.cfg.LintersSettings.Structcheck stylecheckCfg = &m.cfg.LintersSettings.Stylecheck + tagalignCfg = &m.cfg.LintersSettings.TagAlign + tagliatelleCfg = &m.cfg.LintersSettings.Tagliatelle + tenvCfg = &m.cfg.LintersSettings.Tenv + testifylintCfg = &m.cfg.LintersSettings.Testifylint + testpackageCfg = &m.cfg.LintersSettings.Testpackage + thelperCfg = &m.cfg.LintersSettings.Thelper + unparamCfg = &m.cfg.LintersSettings.Unparam unusedCfg = &m.cfg.LintersSettings.Unused + usestdlibvars = &m.cfg.LintersSettings.UseStdlibVars + varcheckCfg = &m.cfg.LintersSettings.Varcheck + varnamelenCfg = &m.cfg.LintersSettings.Varnamelen + whitespaceCfg = &m.cfg.LintersSettings.Whitespace wrapcheckCfg = &m.cfg.LintersSettings.Wrapcheck + wslCfg = &m.cfg.LintersSettings.WSL + + if govetCfg != nil { + govetCfg.Go = m.cfg.Run.Go + } + + if gocriticCfg != nil { + gocriticCfg.Go = trimGoVersion(m.cfg.Run.Go) + } + + if gofumptCfg != nil && gofumptCfg.LangVersion == "" { + gofumptCfg.LangVersion = m.cfg.Run.Go + } + + if staticcheckCfg != nil && staticcheckCfg.GoVersion == "" { + staticcheckCfg.GoVersion = trimGoVersion(m.cfg.Run.Go) + } + if gosimpleCfg != nil && gosimpleCfg.GoVersion == "" { + gosimpleCfg.GoVersion = trimGoVersion(m.cfg.Run.Go) + } + if stylecheckCfg != nil && stylecheckCfg.GoVersion != "" { + stylecheckCfg.GoVersion = trimGoVersion(m.cfg.Run.Go) + } } const megacheckName = "megacheck" - lcs := []*linter.Config{ - linter.NewConfig(golinters.NewGovet(govetCfg)). - WithSince("v1.0.0"). + var linters []*linter.Config + linters = append(linters, m.customLinters...) + + // The linters are sorted in the alphabetical order (case-insensitive). + // When a new linter is added the version in `WithSince(...)` must be the next minor version of golangci-lint. + linters = append(linters, + linter.NewConfig(golinters.NewAsasalint(asasalintCfg)). + WithSince("1.47.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetMetaLinter). - WithAlternativeNames("vet", "vetshadow"). - WithURL("https://golang.org/cmd/vet/"), + WithURL("https://github.com/alingse/asasalint"), + + linter.NewConfig(golinters.NewAsciicheck()). + WithSince("v1.26.0"). + WithPresets(linter.PresetBugs, linter.PresetStyle). + WithURL("https://github.com/tdakkota/asciicheck"), + + linter.NewConfig(golinters.NewBiDiChkFuncName(bidichkCfg)). + WithSince("1.43.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/breml/bidichk"), + linter.NewConfig(golinters.NewBodyclose()). WithSince("v1.18.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance, linter.PresetBugs). WithURL("https://github.com/timakin/bodyclose"), - linter.NewConfig(golinters.NewNoctx()). - WithSince("v1.28.0"). + + linter.NewConfig(golinters.NewContainedCtx()). + WithSince("1.44.0"). WithLoadForGoAnalysis(). - WithPresets(linter.PresetPerformance, linter.PresetBugs). - WithURL("https://github.com/sonatard/noctx"), - linter.NewConfig(golinters.NewErrcheck()). - WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/sivchari/containedctx"), + + linter.NewConfig(golinters.NewContextCheck()). + WithSince("v1.43.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetError). - WithURL("https://github.com/kisielk/errcheck"), - linter.NewConfig(golinters.NewGolint()). + WithURL("https://github.com/kkHAIKE/contextcheck"), + + linter.NewConfig(golinters.NewCyclop(cyclopCfg)). + WithSince("v1.37.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/bkielbasa/cyclop"), + + linter.NewConfig(golinters.NewDecorder(decorderCfg)). + WithSince("v1.44.0"). + WithPresets(linter.PresetFormatting, linter.PresetStyle). + WithURL("https://gitlab.com/bosi/decorder"), + + linter.NewConfig(golinters.NewDeadcode()). WithSince("v1.0.0"). WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"). + Deprecated("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), + + linter.NewConfig(golinters.NewDepguard(depGuardCfg)). + WithSince("v1.4.0"). + WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). + WithURL("https://github.com/OpenPeeDeeP/depguard"), + + linter.NewConfig(golinters.NewDogsled(dogsledCfg)). + WithSince("v1.19.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/golang/lint"). - Deprecated("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), - linter.NewConfig(golinters.NewRowsErrCheck()). - WithSince("v1.23.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetSQL). - WithURL("https://github.com/jingyugao/rowserrcheck"), + WithURL("https://github.com/alexkohler/dogsled"), - linter.NewConfig(golinters.NewStaticcheck(staticcheckCfg)). + linter.NewConfig(golinters.NewDupl(duplCfg)). WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mibk/dupl"), + + linter.NewConfig(golinters.NewDupWord(dupwordCfg)). + WithSince("1.50.0"). + WithPresets(linter.PresetComment). + WithAutoFix(). + WithURL("https://github.com/Abirdcfly/dupword"), + + linter.NewConfig(golinters.NewDurationCheck()). + WithSince("v1.37.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs, linter.PresetMetaLinter). - WithAlternativeNames(megacheckName). - WithURL("https://staticcheck.io/"), - linter.NewConfig(golinters.NewUnused(unusedCfg)). - WithSince("v1.20.0"). + WithURL("https://github.com/charithe/durationcheck"), + + linter.NewConfig(golinters.NewErrcheck(errcheckCfg)). + WithEnabledByDefault(). + WithSince("v1.0.0"). WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithAlternativeNames(megacheckName). - ConsiderSlow(). - WithChangeTypes(). - WithURL("https://github.com/dominikh/go-tools/tree/master/unused"), - linter.NewConfig(golinters.NewGosimple(gosimpleCfg)). - WithSince("v1.20.0"). + WithPresets(linter.PresetBugs, linter.PresetError). + WithURL("https://github.com/kisielk/errcheck"), + + linter.NewConfig(golinters.NewErrChkJSONFuncName(errchkjsonCfg)). + WithSince("1.44.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). + WithURL("https://github.com/breml/errchkjson"), + + linter.NewConfig(golinters.NewErrName()). + WithSince("v1.42.0"). WithPresets(linter.PresetStyle). - WithAlternativeNames(megacheckName). - WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), + WithLoadForGoAnalysis(). + WithURL("https://github.com/Antonboom/errname"), - linter.NewConfig(golinters.NewStylecheck(stylecheckCfg)). - WithSince("v1.20.0"). + linter.NewConfig(golinters.NewErrorLint(errorlintCfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetBugs, linter.PresetError). WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), - linter.NewConfig(golinters.NewGosec(gosecCfg)). - WithSince("v1.0.0"). + WithURL("https://github.com/polyfloyd/go-errorlint"), + + linter.NewConfig(golinters.NewExecInQuery()). + WithSince("v1.46.0"). + WithPresets(linter.PresetSQL). WithLoadForGoAnalysis(). + WithURL("https://github.com/lufeee/execinquery"), + + linter.NewConfig(golinters.NewExhaustive(exhaustiveCfg)). + WithSince(" v1.28.0"). WithPresets(linter.PresetBugs). - WithURL("https://github.com/securego/gosec"). - WithAlternativeNames("gas"), - linter.NewConfig(golinters.NewStructcheck()). - WithSince("v1.0.0"). WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/opennota/check"), - linter.NewConfig(golinters.NewVarcheck()). - WithSince("v1.0.0"). + WithURL("https://github.com/nishanths/exhaustive"), + + linter.NewConfig(golinters.NewExhaustiveStruct(exhaustiveStructCfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/opennota/check"), - linter.NewConfig(golinters.NewInterfacer()). - WithSince("v1.0.0"). + WithURL("https://github.com/mbilski/exhaustivestruct"). + Deprecated("The owner seems to have abandoned the linter.", "v1.46.0", "exhaustruct"), + + linter.NewConfig(golinters.NewExhaustruct(exhaustructCfg)). + WithSince("v1.46.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithLoadForGoAnalysis(). + WithURL("https://github.com/GaijinEntertainment/go-exhaustruct"), + + linter.NewConfig(golinters.NewExportLoopRef()). + WithSince("v1.28.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). + WithURL("https://github.com/kyoh86/exportloopref"), + + linter.NewConfig(golinters.NewForbidigo(forbidigoCfg)). + WithSince("v1.34.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/mvdan/interfacer"). - Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", ""), - linter.NewConfig(golinters.NewUnconvert()). - WithSince("v1.0.0"). + // Strictly speaking, + // the additional information is only needed when forbidigoCfg.AnalyzeTypes is chosen by the user. + // But we don't know that here in all cases (sometimes config is not loaded), + // so we have to assume that it is needed to be on the safe side. WithLoadForGoAnalysis(). + WithURL("https://github.com/ashanbrown/forbidigo"), + + linter.NewConfig(golinters.NewForceTypeAssert()). + WithSince("v1.38.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/mdempsky/unconvert"), - linter.NewConfig(golinters.NewIneffassign()). - WithSince("v1.0.0"). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/gordonklaus/ineffassign"), - linter.NewConfig(golinters.NewDupl()). - WithSince("v1.0.0"). + WithURL("https://github.com/gostaticanalysis/forcetypeassert"), + + linter.NewConfig(golinters.NewFunlen(funlenCfg)). + WithSince("v1.18.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/ultraware/funlen"), + + linter.NewConfig(golinters.NewGci(gciCfg)). + WithSince("v1.30.0"). + WithPresets(linter.PresetFormatting, linter.PresetImport). + WithURL("https://github.com/daixiang0/gci"), + + linter.NewConfig(golinters.NewGinkgoLinter(ginkgolinterCfg)). + WithSince("v1.51.0"). + WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). - WithURL("https://github.com/mibk/dupl"), - linter.NewConfig(golinters.NewGoconst()). - WithSince("v1.0.0"). + WithURL("https://github.com/nunnatsa/ginkgolinter"), + + linter.NewConfig(golinters.NewGoCheckCompilerDirectives()). + WithSince("v1.51.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/leighmcculloch/gocheckcompilerdirectives"), + + linter.NewConfig(golinters.NewGochecknoglobals()). + WithSince("v1.12.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/jgautheron/goconst"), - linter.NewConfig(golinters.NewDeadcode()). - WithSince("v1.0.0"). WithLoadForGoAnalysis(). - WithPresets(linter.PresetUnused). - WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"), - linter.NewConfig(golinters.NewGocyclo()). - WithSince("v1.0.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/fzipp/gocyclo"), - linter.NewConfig(golinters.NewCyclop(cyclopCfg)). - WithSince("v1.37.0"). + WithURL("https://github.com/leighmcculloch/gochecknoglobals"), + + linter.NewConfig(golinters.NewGochecknoinits()). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle), + + linter.NewConfig(golinters.NewGoCheckSumType()). + WithSince("v1.55.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/bkielbasa/cyclop"), - linter.NewConfig(golinters.NewGocognit()). + WithURL("https://github.com/alecthomas/go-check-sumtype"), + + linter.NewConfig(golinters.NewGocognit(gocognitCfg)). WithSince("v1.20.0"). WithPresets(linter.PresetComplexity). WithURL("https://github.com/uudashr/gocognit"), - linter.NewConfig(golinters.NewTypecheck()). - WithSince("v1.3.0"). + + linter.NewConfig(golinters.NewGoconst(goconstCfg)). + WithSince("v1.0.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/jgautheron/goconst"), + + linter.NewConfig(golinters.NewGoCritic(gocriticCfg, m.cfg)). + WithSince("v1.12.0"). + WithPresets(linter.PresetStyle, linter.PresetMetaLinter). WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs). - WithURL(""), - linter.NewConfig(golinters.NewAsciicheck()). + WithURL("https://github.com/go-critic/go-critic"), + + linter.NewConfig(golinters.NewGocyclo(gocycloCfg)). + WithSince("v1.0.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/fzipp/gocyclo"), + + linter.NewConfig(golinters.NewGodot(godotCfg)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithAutoFix(). + WithURL("https://github.com/tetafro/godot"), + + linter.NewConfig(golinters.NewGodox(godoxCfg)). + WithSince("v1.19.0"). + WithPresets(linter.PresetStyle, linter.PresetComment). + WithURL("https://github.com/matoous/godox"), + + linter.NewConfig(golinters.NewGoerr113()). WithSince("v1.26.0"). - WithPresets(linter.PresetBugs, linter.PresetStyle). - WithURL("https://github.com/tdakkota/asciicheck"), + WithPresets(linter.PresetStyle, linter.PresetError). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Djarvur/go-err113"), - linter.NewConfig(golinters.NewGofmt()). + linter.NewConfig(golinters.NewGofmt(gofmtCfg)). WithSince("v1.0.0"). WithPresets(linter.PresetFormatting). WithAutoFix(). - WithURL("https://golang.org/cmd/gofmt/"), - linter.NewConfig(golinters.NewGofumpt()). + WithURL("https://pkg.go.dev/cmd/gofmt"), + + linter.NewConfig(golinters.NewGofumpt(gofumptCfg)). WithSince("v1.28.0"). WithPresets(linter.PresetFormatting). WithAutoFix(). WithURL("https://github.com/mvdan/gofumpt"), - linter.NewConfig(golinters.NewGoimports()). - WithSince("v1.20.0"). - WithPresets(linter.PresetFormatting, linter.PresetImport). - WithAutoFix(). - WithURL("https://godoc.org/golang.org/x/tools/cmd/goimports"), - linter.NewConfig(golinters.NewGoHeader()). + + linter.NewConfig(golinters.NewGoHeader(goheaderCfg)). WithSince("v1.28.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/denis-tingajkin/go-header"), - linter.NewConfig(golinters.NewGci()). - WithSince("v1.30.0"). + WithURL("https://github.com/denis-tingaikin/go-header"), + + linter.NewConfig(golinters.NewGoimports(goimportsCfg)). + WithSince("v1.20.0"). WithPresets(linter.PresetFormatting, linter.PresetImport). WithAutoFix(). - WithURL("https://github.com/daixiang0/gci"), - linter.NewConfig(golinters.NewMaligned()). + WithURL("https://pkg.go.dev/golang.org/x/tools/cmd/goimports"), + + linter.NewConfig(golinters.NewGolint(golintCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/golang/lint"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), + + linter.NewConfig(golinters.NewGoMND(goMndCfg)). + WithSince("v1.22.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/tommy-muehle/go-mnd"), + + linter.NewConfig(golinters.NewGoModDirectives(goModDirectivesCfg)). + WithSince("v1.39.0"). + WithPresets(linter.PresetStyle, linter.PresetModule). + WithURL("https://github.com/ldez/gomoddirectives"), + + linter.NewConfig(golinters.NewGomodguard(gomodguardCfg)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). + WithURL("https://github.com/ryancurrah/gomodguard"), + + linter.NewConfig(golinters.NewGoPrintfFuncName()). + WithSince("v1.23.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/jirfag/go-printf-func-name"), + + linter.NewConfig(golinters.NewGosec(gosecCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/securego/gosec"). + WithAlternativeNames("gas"), + + linter.NewConfig(golinters.NewGosimple(gosimpleCfg)). + WithEnabledByDefault(). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithAlternativeNames(megacheckName). + WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), + + linter.NewConfig(golinters.NewGosmopolitan(gosmopolitanCfg)). + WithSince("v1.53.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/xen0n/gosmopolitan"), + + linter.NewConfig(golinters.NewGovet(govetCfg)). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAlternativeNames("vet", "vetshadow"). + WithURL("https://pkg.go.dev/cmd/vet"), + + linter.NewConfig(golinters.NewGrouper(grouperCfg)). + WithSince("v1.44.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/leonklingele/grouper"), + + linter.NewConfig(golinters.NewIfshort(ifshortCfg)). + WithSince("v1.36.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/esimonov/ifshort"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.48.0", ""), + + linter.NewConfig(golinters.NewImportAs(importAsCfg)). + WithSince("v1.38.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/julz/importas"), + + linter.NewConfig(golinters.NewINamedParam(inamedparamCfg)). + WithSince("v1.55.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/macabu/inamedparam"), + + linter.NewConfig(golinters.NewIneffassign()). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/gordonklaus/ineffassign"), + + linter.NewConfig(golinters.NewInterfaceBloat(interfaceBloatCfg)). + WithSince("v1.49.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/sashamelentyev/interfacebloat"), + + linter.NewConfig(golinters.NewInterfacer()). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/mvdan/interfacer"). + Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", ""), + + linter.NewConfig(golinters.NewIreturn(ireturnCfg)). + WithSince("v1.43.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithURL("https://github.com/butuzov/ireturn"), + + linter.NewConfig(golinters.NewLLL(lllCfg)). + WithSince("v1.8.0"). + WithPresets(linter.PresetStyle), + + linter.NewConfig(golinters.NewLoggerCheck(loggerCheckCfg)). + WithSince("v1.49.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithAlternativeNames("logrlint"). + WithURL("https://github.com/timonwong/loggercheck"), + + linter.NewConfig(golinters.NewMaintIdx(maintIdxCfg)). + WithSince("v1.44.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/yagipy/maintidx"), + + linter.NewConfig(golinters.NewMakezero(makezeroCfg)). + WithSince("v1.34.0"). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ashanbrown/makezero"), + + linter.NewConfig(golinters.NewMaligned(malignedCfg)). WithSince("v1.0.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). WithURL("https://github.com/mdempsky/maligned"). Deprecated("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), - linter.NewConfig(golinters.NewDepguard()). - WithSince("v1.4.0"). + + linter.NewConfig(golinters.NewMirror()). + WithSince("v1.53.0"). + WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). - WithURL("https://github.com/OpenPeeDeeP/depguard"), - linter.NewConfig(golinters.NewMisspell()). + WithURL("https://github.com/butuzov/mirror"), + + linter.NewConfig(golinters.NewMisspell(misspellCfg)). WithSince("v1.8.0"). WithPresets(linter.PresetStyle, linter.PresetComment). WithAutoFix(). WithURL("https://github.com/client9/misspell"), - linter.NewConfig(golinters.NewLLL()). - WithSince("v1.8.0"). - WithPresets(linter.PresetStyle), - linter.NewConfig(golinters.NewUnparam()). - WithSince("v1.9.0"). - WithPresets(linter.PresetUnused). + + linter.NewConfig(golinters.NewMustTag(musttagCfg)). + WithSince("v1.51.0"). WithLoadForGoAnalysis(). - WithURL("https://github.com/mvdan/unparam"), - linter.NewConfig(golinters.NewDogsled()). - WithSince("v1.19.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/alexkohler/dogsled"), - linter.NewConfig(golinters.NewNakedret()). + WithPresets(linter.PresetStyle, linter.PresetBugs). + WithURL("https://github.com/go-simpler/musttag"), + + linter.NewConfig(golinters.NewNakedret(nakedretCfg)). WithSince("v1.19.0"). WithPresets(linter.PresetStyle). WithURL("https://github.com/alexkohler/nakedret"), - linter.NewConfig(golinters.NewPrealloc()). - WithSince("v1.19.0"). - WithPresets(linter.PresetPerformance). - WithURL("https://github.com/alexkohler/prealloc"), - linter.NewConfig(golinters.NewScopelint()). - WithSince("v1.12.0"). + + linter.NewConfig(golinters.NewNestif(nestifCfg)). + WithSince("v1.25.0"). + WithPresets(linter.PresetComplexity). + WithURL("https://github.com/nakabonne/nestif"), + + linter.NewConfig(golinters.NewNilErr()). + WithSince("v1.38.0"). + WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs). - WithURL("https://github.com/kyoh86/scopelint"). - Deprecated("The repository of the linter has been deprecated by the owner.", "v1.39.0", "exportloopref"), - linter.NewConfig(golinters.NewGocritic()). - WithSince("v1.12.0"). - WithPresets(linter.PresetStyle, linter.PresetMetaLinter). + WithURL("https://github.com/gostaticanalysis/nilerr"), + + linter.NewConfig(golinters.NewNilNil(nilNilCfg)). + WithSince("v1.43.0"). + WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). - WithURL("https://github.com/go-critic/go-critic"), - linter.NewConfig(golinters.NewGochecknoinits()). - WithSince("v1.12.0"). + WithURL("https://github.com/Antonboom/nilnil"), + + linter.NewConfig(golinters.NewNLReturn(nlreturnCfg)). + WithSince("v1.30.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/leighmcculloch/gochecknoinits"), - linter.NewConfig(golinters.NewGochecknoglobals()). - WithSince("v1.12.0"). + WithURL("https://github.com/ssgreg/nlreturn"), + + linter.NewConfig(golinters.NewNoctx()). + WithSince("v1.28.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance, linter.PresetBugs). + WithURL("https://github.com/sonatard/noctx"), + + linter.NewConfig(golinters.NewNoNamedReturns(noNamedReturnsCfg)). + WithSince("v1.46.0"). + WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). - WithURL("https://github.com/leighmcculloch/gochecknoglobals"), - linter.NewConfig(golinters.NewGodox()). - WithSince("v1.19.0"). - WithPresets(linter.PresetStyle, linter.PresetComment). - WithURL("https://github.com/matoous/godox"), - linter.NewConfig(golinters.NewFunlen()). - WithSince("v1.18.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/ultraware/funlen"), - linter.NewConfig(golinters.NewWhitespace()). - WithSince("v1.19.0"). + WithURL("https://github.com/firefart/nonamedreturns"), + + linter.NewConfig(golinters.NewNoSnakeCase()). + WithSince("v1.47.0"). WithPresets(linter.PresetStyle). - WithAutoFix(). - WithURL("https://github.com/ultraware/whitespace"), - linter.NewConfig(golinters.NewWSL()). - WithSince("v1.20.0"). + WithURL("https://github.com/sivchari/nosnakecase"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.48.1", "revive(var-naming)"), + + linter.NewConfig(golinters.NewNoSprintfHostPort()). + WithSince("v1.46.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/bombsimon/wsl"), - linter.NewConfig(golinters.NewGoPrintfFuncName()). - WithSince("v1.23.0"). + WithURL("https://github.com/stbenjam/no-sprintf-host-port"), + + linter.NewConfig(golinters.NewParallelTest(parallelTestCfg)). + WithSince("v1.33.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithURL("https://github.com/kunwardeep/paralleltest"), + + linter.NewConfig(golinters.NewPerfSprint(perfSprintCfg)). + WithSince("v1.55.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/catenacyber/perfsprint"), + + linter.NewConfig(golinters.NewPreAlloc(preallocCfg)). + WithSince("v1.19.0"). + WithPresets(linter.PresetPerformance). + WithURL("https://github.com/alexkohler/prealloc"), + + linter.NewConfig(golinters.NewPredeclared(predeclaredCfg)). + WithSince("v1.35.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/jirfag/go-printf-func-name"), - linter.NewConfig(golinters.NewGoMND(m.cfg)). - WithSince("v1.22.0"). + WithURL("https://github.com/nishanths/predeclared"), + + linter.NewConfig(golinters.NewPromlinter(promlinterCfg)). + WithSince("v1.40.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/tommy-muehle/go-mnd"), - linter.NewConfig(golinters.NewGoerr113()). - WithSince("v1.26.0"). - WithPresets(linter.PresetStyle, linter.PresetError). + WithURL("https://github.com/yeya24/promlinter"), + + linter.NewConfig(golinters.NewProtoGetter(protogetterCfg)). + WithSince("v1.55.0"). + WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithURL("https://github.com/Djarvur/go-err113"), - linter.NewConfig(golinters.NewGomodguard()). - WithSince("v1.25.0"). - WithPresets(linter.PresetStyle, linter.PresetImport, linter.PresetModule). - WithURL("https://github.com/ryancurrah/gomodguard"), - linter.NewConfig(golinters.NewGodot()). - WithSince("v1.25.0"). - WithPresets(linter.PresetStyle, linter.PresetComment). WithAutoFix(). - WithURL("https://github.com/tetafro/godot"), - linter.NewConfig(golinters.NewTestpackage(testpackageCfg)). - WithSince("v1.25.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithURL("https://github.com/maratori/testpackage"), - linter.NewConfig(golinters.NewNestif()). - WithSince("v1.25.0"). - WithPresets(linter.PresetComplexity). - WithURL("https://github.com/nakabonne/nestif"), - linter.NewConfig(golinters.NewExportLoopRef()). - WithSince("v1.28.0"). + WithURL("https://github.com/ghostiam/protogetter"), + + linter.NewConfig(golinters.NewReassign(reassignCfg)). + WithSince("1.49.0"). WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithURL("https://github.com/kyoh86/exportloopref"), - linter.NewConfig(golinters.NewExhaustive(exhaustiveCfg)). - WithSince(" v1.28.0"). - WithPresets(linter.PresetBugs). + WithURL("https://github.com/curioswitch/go-reassign"), + + linter.NewConfig(golinters.NewRevive(reviveCfg)). + WithSince("v1.37.0"). + WithPresets(linter.PresetStyle, linter.PresetMetaLinter). + ConsiderSlow(). + WithURL("https://github.com/mgechev/revive"), + + linter.NewConfig(golinters.NewRowsErrCheck(rowserrcheckCfg)). + WithSince("v1.23.0"). WithLoadForGoAnalysis(). - WithURL("https://github.com/nishanths/exhaustive"), + WithPresets(linter.PresetBugs, linter.PresetSQL). + WithURL("https://github.com/jingyugao/rowserrcheck"), + + linter.NewConfig(golinters.NewSlogLint(sloglintCfg)). + WithSince("v1.55.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithURL("https://github.com/go-simpler/sloglint"), + + linter.NewConfig(golinters.NewScopelint()). + WithSince("v1.12.0"). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/kyoh86/scopelint"). + Deprecated("The repository of the linter has been deprecated by the owner.", "v1.39.0", "exportloopref"), + linter.NewConfig(golinters.NewSQLCloseCheck()). WithSince("v1.28.0"). WithPresets(linter.PresetBugs, linter.PresetSQL). WithLoadForGoAnalysis(). WithURL("https://github.com/ryanrolds/sqlclosecheck"), - linter.NewConfig(golinters.NewNLReturn()). - WithSince("v1.30.0"). + + linter.NewConfig(golinters.NewSpancheck(spancheckCfg)). + WithSince("v1.56.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/jjti/go-spancheck"), + + linter.NewConfig(golinters.NewStaticcheck(staticcheckCfg)). + WithEnabledByDefault(). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAlternativeNames(megacheckName). + WithURL("https://staticcheck.io/"), + + linter.NewConfig(golinters.NewStructcheck(structcheckCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/opennota/check"). + Deprecated("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), + + linter.NewConfig(golinters.NewStylecheck(stylecheckCfg)). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), + + linter.NewConfig(golinters.NewTagAlign(tagalignCfg)). + WithSince("v1.53.0"). + WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithAutoFix(). + WithURL("https://github.com/4meepo/tagalign"), + + linter.NewConfig(golinters.NewTagliatelle(tagliatelleCfg)). + WithSince("v1.40.0"). + WithPresets(linter.PresetStyle). + WithURL("https://github.com/ldez/tagliatelle"), + + linter.NewConfig(golinters.NewTenv(tenvCfg)). + WithSince("v1.43.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/ssgreg/nlreturn"), - linter.NewConfig(golinters.NewWrapcheck(wrapcheckCfg)). - WithSince("v1.32.0"). - WithPresets(linter.PresetStyle, linter.PresetError). WithLoadForGoAnalysis(). - WithURL("https://github.com/tomarrell/wrapcheck"), + WithURL("https://github.com/sivchari/tenv"), + + linter.NewConfig(golinters.NewTestableexamples()). + WithSince("v1.50.0"). + WithPresets(linter.PresetTest). + WithURL("https://github.com/maratori/testableexamples"), + + linter.NewConfig(golinters.NewTestifylint(testifylintCfg)). + WithSince("v1.55.0"). + WithPresets(linter.PresetTest, linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/Antonboom/testifylint"), + + linter.NewConfig(golinters.NewTestpackage(testpackageCfg)). + WithSince("v1.25.0"). + WithPresets(linter.PresetStyle, linter.PresetTest). + WithURL("https://github.com/maratori/testpackage"), + linter.NewConfig(golinters.NewThelper(thelperCfg)). WithSince("v1.34.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). WithURL("https://github.com/kulti/thelper"), + linter.NewConfig(golinters.NewTparallel()). WithSince("v1.32.0"). WithPresets(linter.PresetStyle, linter.PresetTest). WithLoadForGoAnalysis(). WithURL("https://github.com/moricho/tparallel"), - linter.NewConfig(golinters.NewExhaustiveStruct(exhaustiveStructCfg)). - WithSince("v1.32.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithLoadForGoAnalysis(). - WithURL("https://github.com/mbilski/exhaustivestruct"), - linter.NewConfig(golinters.NewErrorLint(errorlintCfg)). - WithSince("v1.32.0"). - WithPresets(linter.PresetBugs, linter.PresetError). + + linter.NewConfig(golinters.NewTypecheck()). + WithInternal(). + WithEnabledByDefault(). + WithSince("v1.3.0"). WithLoadForGoAnalysis(). - WithURL("https://github.com/polyfloyd/go-errorlint"), - linter.NewConfig(golinters.NewParallelTest()). - WithSince("v1.33.0"). - WithPresets(linter.PresetStyle, linter.PresetTest). - WithURL("https://github.com/kunwardeep/paralleltest"), - linter.NewConfig(golinters.NewMakezero()). - WithSince("v1.34.0"). - WithPresets(linter.PresetStyle, linter.PresetBugs). + WithPresets(linter.PresetBugs). + WithURL(""), + + linter.NewConfig(golinters.NewUnconvert()). + WithSince("v1.0.0"). WithLoadForGoAnalysis(). - WithURL("https://github.com/ashanbrown/makezero"), - linter.NewConfig(golinters.NewForbidigo()). - WithSince("v1.34.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/ashanbrown/forbidigo"), - linter.NewConfig(golinters.NewIfshort(ifshortCfg)). - WithSince("v1.36.0"). + WithURL("https://github.com/mdempsky/unconvert"), + + linter.NewConfig(golinters.NewUnparam(unparamCfg)). + WithSince("v1.9.0"). + WithPresets(linter.PresetUnused). + WithLoadForGoAnalysis(). + WithURL("https://github.com/mvdan/unparam"), + + linter.NewConfig(golinters.NewUnused(unusedCfg, staticcheckCfg)). + WithEnabledByDefault(). + WithSince("v1.20.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithAlternativeNames(megacheckName). + ConsiderSlow(). + WithChangeTypes(). + WithURL("https://github.com/dominikh/go-tools/tree/master/unused"), + + linter.NewConfig(golinters.NewUseStdlibVars(usestdlibvars)). + WithSince("v1.48.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/esimonov/ifshort"), - linter.NewConfig(golinters.NewPredeclared(predeclaredCfg)). - WithSince("v1.35.0"). + WithURL("https://github.com/sashamelentyev/usestdlibvars"), + + linter.NewConfig(golinters.NewVarcheck(varcheckCfg)). + WithSince("v1.0.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetUnused). + WithURL("https://github.com/opennota/check"). + Deprecated("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), + + linter.NewConfig(golinters.NewVarnamelen(varnamelenCfg)). + WithSince("v1.43.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/nishanths/predeclared"), - linter.NewConfig(golinters.NewRevive(reviveCfg)). - WithSince("v1.37.0"). - WithPresets(linter.PresetStyle, linter.PresetMetaLinter). - ConsiderSlow(). - WithURL("https://github.com/mgechev/revive"), - linter.NewConfig(golinters.NewDurationCheck()). - WithSince("v1.37.0"). - WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). - WithURL("https://github.com/charithe/durationcheck"), + WithURL("https://github.com/blizzy78/varnamelen"), + linter.NewConfig(golinters.NewWastedAssign()). WithSince("v1.38.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). WithURL("https://github.com/sanposhiho/wastedassign"), - linter.NewConfig(golinters.NewImportAs(importAsCfg)). - WithSince("v1.38.0"). + + linter.NewConfig(golinters.NewWhitespace(whitespaceCfg)). + WithSince("v1.19.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). + WithURL("https://github.com/ultraware/whitespace"), + + linter.NewConfig(golinters.NewWrapcheck(wrapcheckCfg)). + WithSince("v1.32.0"). + WithPresets(linter.PresetStyle, linter.PresetError). WithLoadForGoAnalysis(). - WithURL("https://github.com/julz/importas"), - linter.NewConfig(golinters.NewNilErr()). - WithSince("v1.38.0"). - WithLoadForGoAnalysis(). - WithPresets(linter.PresetBugs). - WithURL("https://github.com/gostaticanalysis/nilerr"), - linter.NewConfig(golinters.NewForceTypeAssert()). - WithSince("v1.38.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/gostaticanalysis/forcetypeassert"), - linter.NewConfig(golinters.NewGoModDirectives(goModDirectivesCfg)). - WithSince("v1.39.0"). - WithPresets(linter.PresetStyle, linter.PresetModule). - WithURL("https://github.com/ldez/gomoddirectives"), - linter.NewConfig(golinters.NewPromlinter()). - WithSince("v1.40.0"). - WithPresets(linter.PresetStyle). - WithURL("https://github.com/yeya24/promlinter"), - linter.NewConfig(golinters.NewTagliatelle(tagliatelleCfg)). - WithSince("v1.40.0"). + WithURL("https://github.com/tomarrell/wrapcheck"), + + linter.NewConfig(golinters.NewWSL(wslCfg)). + WithSince("v1.20.0"). WithPresets(linter.PresetStyle). - WithURL("https://github.com/ldez/tagliatelle"), + WithURL("https://github.com/bombsimon/wsl"), + + linter.NewConfig(golinters.NewZerologLint()). + WithSince("v1.53.0"). + WithPresets(linter.PresetBugs). + WithLoadForGoAnalysis(). + WithURL("https://github.com/ykadowak/zerologlint"), // nolintlint must be last because it looks at the results of all the previous linters for unused nolint directives - linter.NewConfig(golinters.NewNoLintLint()). + linter.NewConfig(golinters.NewNoLintLint(noLintLintCfg)). WithSince("v1.26.0"). WithPresets(linter.PresetStyle). WithURL("https://github.com/golangci/golangci-lint/blob/master/pkg/golinters/nolintlint/README.md"), - } + ) - enabledByDefault := map[string]bool{ - golinters.NewGovet(nil).Name(): true, - golinters.NewErrcheck().Name(): true, - golinters.NewStaticcheck(staticcheckCfg).Name(): true, - golinters.NewUnused(unusedCfg).Name(): true, - golinters.NewGosimple(gosimpleCfg).Name(): true, - golinters.NewStructcheck().Name(): true, - golinters.NewVarcheck().Name(): true, - golinters.NewIneffassign().Name(): true, - golinters.NewDeadcode().Name(): true, - golinters.NewTypecheck().Name(): true, - } - return enableLinterConfigs(lcs, func(lc *linter.Config) bool { - return enabledByDefault[lc.Name()] - }) + return linters } func (m Manager) GetAllEnabledByDefaultLinters() []*linter.Config { @@ -550,6 +967,10 @@ func linterConfigsToMap(lcs []*linter.Config) map[string]*linter.Config { func (m Manager) GetAllLinterConfigsForPreset(p string) []*linter.Config { var ret []*linter.Config for _, lc := range m.GetAllSupportedLinterConfigs() { + if lc.IsDeprecated() { + continue + } + for _, ip := range lc.InPresets { if p == ip { ret = append(ret, lc) @@ -561,53 +982,20 @@ func (m Manager) GetAllLinterConfigsForPreset(p string) []*linter.Config { return ret } -func (m Manager) loadCustomLinterConfig(name string, settings config.CustomLinterSettings) (*linter.Config, error) { - analyzer, err := m.getAnalyzerPlugin(settings.Path) - if err != nil { - return nil, err +// Trims the Go version to keep only M.m. +// Since Go 1.21 the version inside the go.mod can be a patched version (ex: 1.21.0). +// https://go.dev/doc/toolchain#versions +// This a problem with staticcheck and gocritic. +func trimGoVersion(v string) string { + if v == "" { + return "" } - m.log.Infof("Loaded %s: %s", settings.Path, name) - customLinter := goanalysis.NewLinter( - name, - settings.Description, - analyzer.GetAnalyzers(), - nil).WithLoadMode(goanalysis.LoadModeTypesInfo) - linterConfig := linter.NewConfig(customLinter) - linterConfig.EnabledByDefault = true - linterConfig.IsSlow = false - linterConfig.WithURL(settings.OriginalURL) - return linterConfig, nil -} -type AnalyzerPlugin interface { - GetAnalyzers() []*analysis.Analyzer -} - -func (m Manager) getAnalyzerPlugin(path string) (AnalyzerPlugin, error) { - if !filepath.IsAbs(path) { - // resolve non-absolute paths relative to config file's directory - configFilePath := viper.ConfigFileUsed() - absConfigFilePath, err := filepath.Abs(configFilePath) - if err != nil { - return nil, fmt.Errorf("could not get absolute representation of config file path %q: %v", configFilePath, err) - } - path = filepath.Join(filepath.Dir(absConfigFilePath), path) - } - - plug, err := plugin.Open(path) - if err != nil { - return nil, err - } - - symbol, err := plug.Lookup("AnalyzerPlugin") - if err != nil { - return nil, err - } + exp := regexp.MustCompile(`(\d\.\d+)(?:\.\d+|[a-z]+\d)`) - analyzerPlugin, ok := symbol.(AnalyzerPlugin) - if !ok { - return nil, fmt.Errorf("plugin %s does not abide by 'AnalyzerPlugin' interface", path) + if exp.MatchString(v) { + return exp.FindStringSubmatch(v)[1] } - return analyzerPlugin, nil + return v } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go index ed731a9686..52a70d8590 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/validator.go @@ -1,6 +1,7 @@ package lintersdb import ( + "errors" "fmt" "strings" @@ -21,7 +22,7 @@ func (v Validator) validateLintersNames(cfg *config.Linters) error { allNames := append([]string{}, cfg.Enable...) allNames = append(allNames, cfg.Disable...) - unknownNames := []string{} + var unknownNames []string for _, name := range allNames { if v.m.GetLinterConfigs(name) == nil { @@ -30,7 +31,7 @@ func (v Validator) validateLintersNames(cfg *config.Linters) error { } if len(unknownNames) > 0 { - return fmt.Errorf("unknown linters: '%v', run 'golangci-lint linters' to see the list of supported linters", + return fmt.Errorf("unknown linters: '%v', run 'golangci-lint help linters' to see the list of supported linters", strings.Join(unknownNames, ",")) } @@ -47,7 +48,7 @@ func (v Validator) validatePresets(cfg *config.Linters) error { } if len(cfg.Presets) != 0 && cfg.EnableAll { - return fmt.Errorf("--presets is incompatible with --enable-all") + return errors.New("--presets is incompatible with --enable-all") } return nil @@ -55,12 +56,12 @@ func (v Validator) validatePresets(cfg *config.Linters) error { func (v Validator) validateAllDisableEnableOptions(cfg *config.Linters) error { if cfg.EnableAll && cfg.DisableAll { - return fmt.Errorf("--enable-all and --disable-all options must not be combined") + return errors.New("--enable-all and --disable-all options must not be combined") } if cfg.DisableAll { if len(cfg.Enable) == 0 && len(cfg.Presets) == 0 { - return fmt.Errorf("all linters were disabled, but no one linter was enabled: must enable at least one") + return errors.New("all linters were disabled, but no one linter was enabled: must enable at least one") } if len(cfg.Disable) != 0 { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go index 69852afb98..babad5ba60 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/load.go @@ -11,7 +11,6 @@ import ( "strings" "time" - "github.com/pkg/errors" "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/internal/pkgcache" @@ -41,7 +40,7 @@ func NewContextLoader(cfg *config.Config, log logutils.Log, goenv *goutil.Env, return &ContextLoader{ cfg: cfg, log: log, - debugf: logutils.Debug("loader"), + debugf: logutils.Debug(logutils.DebugKeyLoader), goenv: goenv, pkgTestIDRe: regexp.MustCompile(`^(.*) \[(.*)\.test\]`), lineCache: lineCache, @@ -59,7 +58,7 @@ func (cl *ContextLoader) prepareBuildContext() { return } - os.Setenv("GOROOT", goroot) + os.Setenv(string(goutil.EnvGoRoot), goroot) build.Default.GOROOT = goroot build.Default.BuildTags = cl.cfg.Run.BuildTags } @@ -84,7 +83,7 @@ func (cl *ContextLoader) buildArgs() []string { if strings.HasPrefix(arg, ".") || filepath.IsAbs(arg) { retArgs = append(retArgs, arg) } else { - // go/packages doesn't work well if we don't have prefix ./ for local packages + // go/packages doesn't work well if we don't have the prefix ./ for local packages retArgs = append(retArgs, fmt.Sprintf(".%c%s", filepath.Separator, arg)) } } @@ -126,7 +125,7 @@ func stringifyLoadMode(mode packages.LoadMode) string { m := map[packages.LoadMode]string{ packages.NeedCompiledGoFiles: "compiled_files", packages.NeedDeps: "deps", - packages.NeedExportsFile: "exports_file", + packages.NeedExportFile: "exports_file", packages.NeedFiles: "files", packages.NeedImports: "imports", packages.NeedName: "name", @@ -160,15 +159,27 @@ func (cl *ContextLoader) debugPrintLoadedPackages(pkgs []*packages.Package) { func (cl *ContextLoader) parseLoadedPackagesErrors(pkgs []*packages.Package) error { for _, pkg := range pkgs { + var errs []packages.Error for _, err := range pkg.Errors { + // quick fix: skip error related to `go list` invocation by packages.Load() + // The behavior has been changed between go1.19 and go1.20, the error is now inside the JSON content. + // https://github.com/golangci/golangci-lint/pull/3414#issuecomment-1364756303 + if strings.Contains(err.Msg, "# command-line-arguments") { + continue + } + + errs = append(errs, err) + if strings.Contains(err.Msg, "no Go files") { - return errors.Wrapf(exitcodes.ErrNoGoFiles, "package %s", pkg.PkgPath) + return fmt.Errorf("package %s: %w", pkg.PkgPath, exitcodes.ErrNoGoFiles) } if strings.Contains(err.Msg, "cannot find package") { // when analyzing not existing directory - return errors.Wrap(exitcodes.ErrFailure, err.Msg) + return fmt.Errorf("%v: %w", err.Msg, exitcodes.ErrFailure) } } + + pkg.Errors = errs } return nil @@ -183,7 +194,7 @@ func (cl *ContextLoader) loadPackages(ctx context.Context, loadMode packages.Loa buildFlags, err := cl.makeBuildFlags() if err != nil { - return nil, errors.Wrap(err, "failed to make build flags for go list") + return nil, fmt.Errorf("failed to make build flags for go list: %w", err) } conf := &packages.Config{ @@ -192,21 +203,21 @@ func (cl *ContextLoader) loadPackages(ctx context.Context, loadMode packages.Loa Context: ctx, BuildFlags: buildFlags, Logf: cl.debugf, - //TODO: use fset, parsefile, overlay + // TODO: use fset, parsefile, overlay } args := cl.buildArgs() cl.debugf("Built loader args are %s", args) pkgs, err := packages.Load(conf, args...) if err != nil { - return nil, errors.Wrap(err, "failed to load with go/packages") + return nil, fmt.Errorf("failed to load with go/packages: %w", err) } // Currently, go/packages doesn't guarantee that error will be returned // if context was canceled. See // https://github.com/golang/tools/commit/c5cec6710e927457c3c29d6c156415e8539a5111#r39261855 if ctx.Err() != nil { - return nil, errors.Wrap(ctx.Err(), "timed out to load packages") + return nil, fmt.Errorf("timed out to load packages: %w", ctx.Err()) } if loadMode&packages.NeedSyntax == 0 { @@ -287,7 +298,7 @@ func (cl *ContextLoader) Load(ctx context.Context, linters []*linter.Config) (*l loadMode := cl.findLoadMode(linters) pkgs, err := cl.loadPackages(ctx, loadMode) if err != nil { - return nil, errors.Wrap(err, "failed to load packages") + return nil, fmt.Errorf("failed to load packages: %w", err) } deduplicatedPkgs := cl.filterDuplicatePackages(pkgs) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index 8882b93009..e7cb17555a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -2,11 +2,11 @@ package lint import ( "context" + "errors" "fmt" "runtime/debug" "strings" - "github.com/pkg/errors" gopackages "golang.org/x/tools/go/packages" "github.com/golangci/golangci-lint/internal/errorutil" @@ -27,9 +27,16 @@ type Runner struct { Log logutils.Log } -func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lintersdb.EnabledSet, - lineCache *fsutils.LineCache, dbManager *lintersdb.Manager, pkgs []*gopackages.Package) (*Runner, error) { - skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles) +func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, + es *lintersdb.EnabledSet, + lineCache *fsutils.LineCache, fileCache *fsutils.FileCache, + dbManager *lintersdb.Manager, pkgs []*gopackages.Package) (*Runner, error) { + // Beware that some processors need to add the path prefix when working with paths + // because they get invoked before the path prefixer (exclude and severity rules) + // or process other paths (skip files). + files := fsutils.NewFiles(lineCache, cfg.Output.PathPrefix) + + skipFilesProcessor, err := processors.NewSkipFiles(cfg.Run.SkipFiles, cfg.Output.PathPrefix) if err != nil { return nil, err } @@ -38,14 +45,14 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint if cfg.Run.UseDefaultSkipDirs { skipDirs = append(skipDirs, packages.StdExcludeDirRegexps...) } - skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child("skip dirs"), cfg.Run.Args) + skipDirsProcessor, err := processors.NewSkipDirs(skipDirs, log.Child(logutils.DebugKeySkipDirs), cfg.Run.Args, cfg.Output.PathPrefix) if err != nil { return nil, err } enabledLinters, err := es.GetEnabledLintersMap() if err != nil { - return nil, errors.Wrap(err, "failed to get enabled linters") + return nil, fmt.Errorf("failed to get enabled linters: %w", err) } // print deprecated messages @@ -57,7 +64,7 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint var extra string if lc.Deprecation.Replacement != "" { - extra = fmt.Sprintf(" Replaced by %s.", lc.Deprecation.Replacement) + extra = fmt.Sprintf("Replaced by %s.", lc.Deprecation.Replacement) } log.Warnf("The linter '%s' is deprecated (since %s) due to: %s %s", name, lc.Deprecation.Since, lc.Deprecation.Message, extra) @@ -69,7 +76,7 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint processors.NewCgo(goenv), // Must go after Cgo. - processors.NewFilenameUnadjuster(pkgs, log.Child("filename_unadjuster")), + processors.NewFilenameUnadjuster(pkgs, log.Child(logutils.DebugKeyFilenameUnadjuster)), // Must be before diff, nolint and exclude autogenerated processor at least. processors.NewPathPrettifier(), @@ -82,17 +89,22 @@ func NewRunner(cfg *config.Config, log logutils.Log, goenv *goutil.Env, es *lint processors.NewIdentifierMarker(), getExcludeProcessor(&cfg.Issues), - getExcludeRulesProcessor(&cfg.Issues, log, lineCache), - processors.NewNolint(log.Child("nolint"), dbManager, enabledLinters), + getExcludeRulesProcessor(&cfg.Issues, log, files), + processors.NewNolint(log.Child(logutils.DebugKeyNolint), dbManager, enabledLinters), processors.NewUniqByLine(cfg), - processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath), + processors.NewDiff(cfg.Issues.Diff, cfg.Issues.DiffFromRevision, cfg.Issues.DiffPatchFilePath, cfg.Issues.WholeFiles), processors.NewMaxPerFileFromLinter(cfg), - processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child("max_same_issues"), cfg), - processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child("max_from_linter"), cfg), - processors.NewSourceCode(lineCache, log.Child("source_code")), + processors.NewMaxSameIssues(cfg.Issues.MaxSameIssues, log.Child(logutils.DebugKeyMaxSameIssues), cfg), + processors.NewMaxFromLinter(cfg.Issues.MaxIssuesPerLinter, log.Child(logutils.DebugKeyMaxFromLinter), cfg), + processors.NewSourceCode(lineCache, log.Child(logutils.DebugKeySourceCode)), processors.NewPathShortener(), - getSeverityRulesProcessor(&cfg.Severity, log, lineCache), + getSeverityRulesProcessor(&cfg.Severity, log, files), + + // The fixer still needs to see paths for the issues that are relative to the current directory. + processors.NewFixer(cfg, log, fileCache), + + // Now we can modify the issues for output. processors.NewPathPrefixer(cfg.Output.PathPrefix), processors.NewSortResults(cfg), }, @@ -123,7 +135,7 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, // which affects to the next analysis. // To avoid this issue, we clear type information from the packages. // See https://github.com/golangci/golangci-lint/pull/944. - // Currently DoesChangeTypes is true only for `unused`. + // Currently, DoesChangeTypes is true only for `unused`. lintCtx.ClearTypesInPackages() } @@ -192,20 +204,27 @@ func (r Runner) Run(ctx context.Context, linters []*linter.Config, lintCtx *lint sw := timeutils.NewStopwatch("linters", r.Log) defer sw.Print() - var issues []result.Issue + var ( + lintErrors error + issues []result.Issue + ) + for _, lc := range linters { lc := lc sw.TrackStage(lc.Name(), func() { linterIssues, err := r.runLinterSafe(ctx, lintCtx, lc) if err != nil { + lintErrors = errors.Join(lintErrors, fmt.Errorf("can't run linter %s", lc.Linter.Name()), err) r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err) + return } + issues = append(issues, linterIssues...) }) } - return r.processLintResults(issues), nil + return r.processLintResults(issues), lintErrors } func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, statPerProcessor map[string]processorStat) []result.Issue { @@ -252,15 +271,16 @@ func getExcludeProcessor(cfg *config.Issues) processors.Processor { return excludeProcessor } -func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { +func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, files *fsutils.Files) processors.Processor { var excludeRules []processors.ExcludeRule for _, r := range cfg.ExcludeRules { excludeRules = append(excludeRules, processors.ExcludeRule{ BaseRule: processors.BaseRule{ - Text: r.Text, - Source: r.Source, - Path: r.Path, - Linters: r.Linters, + Text: r.Text, + Source: r.Source, + Path: r.Path, + PathExcept: r.PathExcept, + Linters: r.Linters, }, }) } @@ -280,30 +300,31 @@ func getExcludeRulesProcessor(cfg *config.Issues, log logutils.Log, lineCache *f if cfg.ExcludeCaseSensitive { excludeRulesProcessor = processors.NewExcludeRulesCaseSensitive( excludeRules, - lineCache, - log.Child("exclude_rules"), + files, + log.Child(logutils.DebugKeyExcludeRules), ) } else { excludeRulesProcessor = processors.NewExcludeRules( excludeRules, - lineCache, - log.Child("exclude_rules"), + files, + log.Child(logutils.DebugKeyExcludeRules), ) } return excludeRulesProcessor } -func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, lineCache *fsutils.LineCache) processors.Processor { +func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, files *fsutils.Files) processors.Processor { var severityRules []processors.SeverityRule for _, r := range cfg.Rules { severityRules = append(severityRules, processors.SeverityRule{ Severity: r.Severity, BaseRule: processors.BaseRule{ - Text: r.Text, - Source: r.Source, - Path: r.Path, - Linters: r.Linters, + Text: r.Text, + Source: r.Source, + Path: r.Path, + PathExcept: r.PathExcept, + Linters: r.Linters, }, }) } @@ -313,15 +334,15 @@ func getSeverityRulesProcessor(cfg *config.Severity, log logutils.Log, lineCache severityRulesProcessor = processors.NewSeverityRulesCaseSensitive( cfg.Default, severityRules, - lineCache, - log.Child("severity_rules"), + files, + log.Child(logutils.DebugKeySeverityRules), ) } else { severityRulesProcessor = processors.NewSeverityRules( cfg.Default, severityRules, - lineCache, - log.Child("severity_rules"), + files, + log.Child(logutils.DebugKeySeverityRules), ) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go index b955417a87..16067e490e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go @@ -1,11 +1,11 @@ package logutils type Log interface { - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Infof(format string, args ...interface{}) + Fatalf(format string, args ...any) + Panicf(format string, args ...any) + Errorf(format string, args ...any) + Warnf(format string, args ...any) + Infof(format string, args ...any) Child(name string) Log SetLevel(level LogLevel) @@ -14,18 +14,18 @@ type Log interface { type LogLevel int const ( - // Debug messages, write to debug logs only by logutils.Debug. + // LogLevelDebug Debug messages, write to debug logs only by logutils.Debug. LogLevelDebug LogLevel = 0 - // Information messages, don't write too much messages, + // LogLevelInfo Information messages, don't write too many messages, // only useful ones: they are shown when running with -v. LogLevelInfo LogLevel = 1 - // Hidden errors: non critical errors: work can be continued, no need to fail whole program; + // LogLevelWarn Hidden errors: non-critical errors: work can be continued, no need to fail whole program; // tests will crash if any warning occurred. LogLevelWarn LogLevel = 2 - // Only not hidden from user errors: whole program failing, usually + // LogLevelError Only not hidden from user errors: whole program failing, usually // error logging happens in 1-2 places: in the "main" function. LogLevelError LogLevel = 3 ) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go index 93c9873d9f..94479bc7b9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go @@ -5,9 +5,66 @@ import ( "strings" ) +// envDebug value: one or several debug keys. +// examples: +// - Remove output to `/dev/null`: `GL_DEBUG=linters_output ./golangci-lint run` +// - Show linters configuration: `GL_DEBUG=enabled_linters golangci-lint run` +// - Some analysis details: `GL_DEBUG=goanalysis/analyze,goanalysis/facts golangci-lint run` +const envDebug = "GL_DEBUG" + +const ( + DebugKeyAutogenExclude = "autogen_exclude" // Debugs a filter excluding autogenerated source code. + DebugKeyBinSalt = "bin_salt" + DebugKeyConfigReader = "config_reader" + DebugKeyEmpty = "" + DebugKeyEnabledLinters = "enabled_linters" + DebugKeyEnv = "env" // Debugs `go env` command. + DebugKeyExcludeRules = "exclude_rules" + DebugKeyExec = "exec" + DebugKeyFilenameUnadjuster = "filename_unadjuster" + DebugKeyForbidigo = "forbidigo" + DebugKeyGoEnv = "goenv" + DebugKeyLinter = "linter" + DebugKeyLintersContext = "linters_context" + DebugKeyLintersDB = "lintersdb" + DebugKeyLintersOutput = "linters_output" + DebugKeyLoader = "loader" // Debugs packages loading (including `go/packages` internal debugging). + DebugKeyMaxFromLinter = "max_from_linter" + DebugKeyMaxSameIssues = "max_same_issues" + DebugKeyPkgCache = "pkgcache" + DebugKeyRunner = "runner" + DebugKeySeverityRules = "severity_rules" + DebugKeySkipDirs = "skip_dirs" + DebugKeySourceCode = "source_code" + DebugKeyStopwatch = "stopwatch" + DebugKeyTabPrinter = "tab_printer" + DebugKeyTest = "test" + DebugKeyTextPrinter = "text_printer" +) + +const ( + DebugKeyGoAnalysis = "goanalysis" + + DebugKeyGoAnalysisAnalyze = DebugKeyGoAnalysis + "/analyze" + DebugKeyGoAnalysisIssuesCache = DebugKeyGoAnalysis + "/issues/cache" + DebugKeyGoAnalysisMemory = DebugKeyGoAnalysis + "/memory" + + DebugKeyGoAnalysisFacts = DebugKeyGoAnalysis + "/facts" + DebugKeyGoAnalysisFactsCache = DebugKeyGoAnalysisFacts + "/cache" + DebugKeyGoAnalysisFactsExport = DebugKeyGoAnalysisFacts + "/export" + DebugKeyGoAnalysisFactsInherit = DebugKeyGoAnalysisFacts + "/inherit" +) + +const ( + DebugKeyGoCritic = "gocritic" // Debugs `go-critic` linter. + DebugKeyMegacheck = "megacheck" // Debugs `staticcheck` related linters. + DebugKeyNolint = "nolint" // Debugs a filter excluding issues by `//nolint` comments. + DebugKeyRevive = "revive" // Debugs `revive` linter. +) + func getEnabledDebugs() map[string]bool { ret := map[string]bool{} - debugVar := os.Getenv("GL_DEBUG") + debugVar := os.Getenv(envDebug) if debugVar == "" { return ret } @@ -21,9 +78,9 @@ func getEnabledDebugs() map[string]bool { var enabledDebugs = getEnabledDebugs() -type DebugFunc func(format string, args ...interface{}) +type DebugFunc func(format string, args ...any) -func nopDebugf(format string, args ...interface{}) {} +func nopDebugf(_ string, _ ...any) {} func Debug(tag string) DebugFunc { if !enabledDebugs[tag] { @@ -33,7 +90,7 @@ func Debug(tag string) DebugFunc { logger := NewStderrLog(tag) logger.SetLevel(LogLevelDebug) - return func(format string, args ...interface{}) { + return func(format string, args ...any) { logger.Debugf(format, args...) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go index e897ce1ede..efda8cc20f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/mock.go @@ -12,28 +12,28 @@ func NewMockLog() *MockLog { return &MockLog{} } -func (m *MockLog) Fatalf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Fatalf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Panicf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Panicf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Errorf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Errorf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Warnf(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Warnf(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } -func (m *MockLog) Infof(format string, args ...interface{}) { - mArgs := []interface{}{format} +func (m *MockLog) Infof(format string, args ...any) { + mArgs := []any{format} m.Called(append(mArgs, args...)...) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go index b4697ee4c7..367c94f385 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/stderr_log.go @@ -10,13 +10,20 @@ import ( "github.com/golangci/golangci-lint/pkg/exitcodes" ) +const ( + // envLogLevel values: "error", "err", "warning", "warn","info" + envLogLevel = "LOG_LEVEL" + // envLogTimestamp value: "1" + envLogTimestamp = "LOG_TIMESTAMP" +) + type StderrLog struct { name string logger *logrus.Logger level LogLevel } -var _ Log = NewStderrLog("") +var _ Log = NewStderrLog(DebugKeyEmpty) func NewStderrLog(name string) *StderrLog { sl := &StderrLog{ @@ -25,7 +32,7 @@ func NewStderrLog(name string) *StderrLog { level: LogLevelWarn, } - switch os.Getenv("LOG_LEVEL") { + switch os.Getenv(envLogLevel) { case "error", "err": sl.logger.SetLevel(logrus.ErrorLevel) case "warning", "warn": @@ -38,9 +45,10 @@ func NewStderrLog(name string) *StderrLog { sl.logger.Out = StdErr formatter := &logrus.TextFormatter{ - DisableTimestamp: true, // `INFO[0007] msg` -> `INFO msg` + DisableTimestamp: true, // `INFO[0007] msg` -> `INFO msg` + EnvironmentOverrideColors: true, } - if os.Getenv("LOG_TIMESTAMP") == "1" { + if os.Getenv(envLogTimestamp) == "1" { formatter.DisableTimestamp = false formatter.FullTimestamp = true formatter.TimestampFormat = time.StampMilli @@ -59,17 +67,17 @@ func (sl StderrLog) prefix() string { return prefix } -func (sl StderrLog) Fatalf(format string, args ...interface{}) { +func (sl StderrLog) Fatalf(format string, args ...any) { sl.logger.Errorf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) os.Exit(exitcodes.Failure) } -func (sl StderrLog) Panicf(format string, args ...interface{}) { +func (sl StderrLog) Panicf(format string, args ...any) { v := fmt.Sprintf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) panic(v) } -func (sl StderrLog) Errorf(format string, args ...interface{}) { +func (sl StderrLog) Errorf(format string, args ...any) { if sl.level > LogLevelError { return } @@ -80,7 +88,7 @@ func (sl StderrLog) Errorf(format string, args ...interface{}) { // called on hidden errors, see log levels comments. } -func (sl StderrLog) Warnf(format string, args ...interface{}) { +func (sl StderrLog) Warnf(format string, args ...any) { if sl.level > LogLevelWarn { return } @@ -88,7 +96,7 @@ func (sl StderrLog) Warnf(format string, args ...interface{}) { sl.logger.Warnf("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) } -func (sl StderrLog) Infof(format string, args ...interface{}) { +func (sl StderrLog) Infof(format string, args ...any) { if sl.level > LogLevelInfo { return } @@ -96,7 +104,7 @@ func (sl StderrLog) Infof(format string, args ...interface{}) { sl.logger.Infof("%s%s", sl.prefix(), fmt.Sprintf(format, args...)) } -func (sl StderrLog) Debugf(format string, args ...interface{}) { +func (sl StderrLog) Debugf(format string, args ...any) { if sl.level > LogLevelDebug { return } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go index c620573b93..ff37651aff 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/errors.go @@ -1,15 +1,13 @@ package packages import ( + "errors" "fmt" "go/token" "strconv" "strings" - - "github.com/pkg/errors" ) -//nolint:gomnd func ParseErrorPosition(pos string) (*token.Position, error) { // file:line(:colon) parts := strings.Split(pos, ":") @@ -20,14 +18,14 @@ func ParseErrorPosition(pos string) (*token.Position, error) { file := parts[0] line, err := strconv.Atoi(parts[1]) if err != nil { - return nil, fmt.Errorf("can't parse line number %q: %s", parts[1], err) + return nil, fmt.Errorf("can't parse line number %q: %w", parts[1], err) } var column int if len(parts) == 3 { // no column column, err = strconv.Atoi(parts[2]) if err != nil { - return nil, errors.Wrapf(err, "failed to parse column from %q", parts[2]) + return nil, fmt.Errorf("failed to parse column from %q: %w", parts[2], err) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go b/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go index e4268897f4..6a7789ebb7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/packages/util.go @@ -42,7 +42,7 @@ func ExtractErrors(pkg *packages.Package) []packages.Error { continue } - // change pos to local file to properly process it by processors (properly read line etc) + // change pos to local file to properly process it by processors (properly read line etc.) uniqErrors[i].Msg = fmt.Sprintf("%s: %s", uniqErrors[i].Pos, uniqErrors[i].Msg) uniqErrors[i].Pos = fmt.Sprintf("%s:1", pkg.GoFiles[0]) } @@ -65,7 +65,7 @@ func extractErrorsImpl(pkg *packages.Package, seenPackages map[*packages.Package } seenPackages[pkg] = true - if !pkg.IllTyped { // otherwise it may take hours to traverse all deps many times + if !pkg.IllTyped { // otherwise, it may take hours to traverse all deps many times return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go index c5b948a98d..e32eef7f51 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go @@ -1,16 +1,19 @@ package printers import ( - "context" "encoding/xml" "fmt" + "io" + "sort" "github.com/go-xmlfmt/xmlfmt" + "golang.org/x/exp/maps" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) +const defaultCheckstyleSeverity = "error" + type checkstyleOutput struct { XMLName xml.Name `xml:"checkstyle"` Version string `xml:"version,attr"` @@ -30,15 +33,15 @@ type checkstyleError struct { Source string `xml:"source,attr"` } -const defaultCheckstyleSeverity = "error" - -type Checkstyle struct{} +type Checkstyle struct { + w io.Writer +} -func NewCheckstyle() *Checkstyle { - return &Checkstyle{} +func NewCheckstyle(w io.Writer) *Checkstyle { + return &Checkstyle{w: w} } -func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { +func (p Checkstyle) Print(issues []result.Issue) error { out := checkstyleOutput{ Version: "5.0", } @@ -72,16 +75,21 @@ func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { file.Errors = append(file.Errors, newError) } - out.Files = make([]*checkstyleFile, 0, len(files)) - for _, file := range files { - out.Files = append(out.Files, file) - } + out.Files = maps.Values(files) + + sort.Slice(out.Files, func(i, j int) bool { + return out.Files[i].Name < out.Files[j].Name + }) data, err := xml.Marshal(&out) if err != nil { return err } - fmt.Fprintf(logutils.StdOut, "%s%s\n", xml.Header, xmlfmt.FormatXML(string(data), "", " ")) + _, err = fmt.Fprintf(p.w, "%s%s\n", xml.Header, xmlfmt.FormatXML(string(data), "", " ")) + if err != nil { + return err + } + return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index 35a22ce99a..50d6dcff3b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -1,16 +1,18 @@ package printers import ( - "context" "encoding/json" - "fmt" + "io" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -// CodeClimateIssue is a subset of the Code Climate spec - https://github.com/codeclimate/spec/blob/master/SPEC.md#data-types -// It is just enough to support GitLab CI Code Quality - https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html +const defaultCodeClimateSeverity = "critical" + +// CodeClimateIssue is a subset of the Code Climate spec. +// https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types +// It is just enough to support GitLab CI Code Quality. +// https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html type CodeClimateIssue struct { Description string `json:"description"` Severity string `json:"severity,omitempty"` @@ -24,14 +26,15 @@ type CodeClimateIssue struct { } type CodeClimate struct { + w io.Writer } -func NewCodeClimate() *CodeClimate { - return &CodeClimate{} +func NewCodeClimate(w io.Writer) *CodeClimate { + return &CodeClimate{w: w} } -func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { - codeClimateIssues := []CodeClimateIssue{} +func (p CodeClimate) Print(issues []result.Issue) error { + codeClimateIssues := make([]CodeClimateIssue, 0, len(issues)) for i := range issues { issue := &issues[i] codeClimateIssue := CodeClimateIssue{} @@ -39,6 +42,7 @@ func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { codeClimateIssue.Location.Path = issue.Pos.Filename codeClimateIssue.Location.Lines.Begin = issue.Pos.Line codeClimateIssue.Fingerprint = issue.Fingerprint() + codeClimateIssue.Severity = defaultCodeClimateSeverity if issue.Severity != "" { codeClimateIssue.Severity = issue.Severity @@ -47,11 +51,9 @@ func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { codeClimateIssues = append(codeClimateIssues, codeClimateIssue) } - outputJSON, err := json.Marshal(codeClimateIssues) + err := json.NewEncoder(p.w).Encode(codeClimateIssues) if err != nil { return err } - - fmt.Fprint(logutils.StdOut, string(outputJSON)) return nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go index 4ebc266857..f471d5a866 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/github.go @@ -1,22 +1,23 @@ package printers import ( - "context" "fmt" + "io" + "path/filepath" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) type github struct { + w io.Writer } const defaultGithubSeverity = "error" -// NewGithub output format outputs issues according to Github actions format: -// https://help.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-error-message -func NewGithub() Printer { - return &github{} +// NewGithub output format outputs issues according to GitHub actions format: +// https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message +func NewGithub(w io.Writer) Printer { + return &github{w: w} } // print each line as: ::error file=app.js,line=10,col=15::Something went wrong @@ -26,7 +27,12 @@ func formatIssueAsGithub(issue *result.Issue) string { severity = issue.Severity } - ret := fmt.Sprintf("::%s file=%s,line=%d", severity, issue.FilePath(), issue.Line()) + // Convert backslashes to forward slashes. + // This is needed when running on windows. + // Otherwise, GitHub won't be able to show the annotations pointing to the file path with backslashes. + file := filepath.ToSlash(issue.FilePath()) + + ret := fmt.Sprintf("::%s file=%s,line=%d", severity, file, issue.Line()) if issue.Pos.Column != 0 { ret += fmt.Sprintf(",col=%d", issue.Pos.Column) } @@ -35,9 +41,9 @@ func formatIssueAsGithub(issue *result.Issue) string { return ret } -func (g *github) Print(_ context.Context, issues []result.Issue) error { +func (p *github) Print(issues []result.Issue) error { for ind := range issues { - _, err := fmt.Fprintln(logutils.StdOut, formatIssueAsGithub(&issues[ind])) + _, err := fmt.Fprintln(p.w, formatIssueAsGithub(&issues[ind])) if err != nil { return err } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go index 65ab753bd5..7dd1e5c623 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/html.go @@ -1,12 +1,11 @@ package printers import ( - "context" "fmt" "html/template" + "io" "strings" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -123,13 +122,15 @@ type htmlIssue struct { Code string } -type HTML struct{} +type HTML struct { + w io.Writer +} -func NewHTML() *HTML { - return &HTML{} +func NewHTML(w io.Writer) *HTML { + return &HTML{w: w} } -func (h HTML) Print(_ context.Context, issues []result.Issue) error { +func (p HTML) Print(issues []result.Issue) error { var htmlIssues []htmlIssue for i := range issues { @@ -151,5 +152,5 @@ func (h HTML) Print(_ context.Context, issues []result.Issue) error { return err } - return t.Execute(logutils.StdOut, struct{ Issues []htmlIssue }{Issues: htmlIssues}) + return t.Execute(p.w, struct{ Issues []htmlIssue }{Issues: htmlIssues}) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go index 6ffa996fb0..4bae526b87 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/json.go @@ -1,22 +1,22 @@ package printers import ( - "context" "encoding/json" - "fmt" + "io" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/report" "github.com/golangci/golangci-lint/pkg/result" ) type JSON struct { rd *report.Data + w io.Writer } -func NewJSON(rd *report.Data) *JSON { +func NewJSON(rd *report.Data, w io.Writer) *JSON { return &JSON{ rd: rd, + w: w, } } @@ -25,17 +25,14 @@ type JSONResult struct { Report *report.Data } -func (p JSON) Print(ctx context.Context, issues []result.Issue) error { +func (p JSON) Print(issues []result.Issue) error { res := JSONResult{ Issues: issues, Report: p.rd, } - - outputJSON, err := json.Marshal(res) - if err != nil { - return err + if res.Issues == nil { + res.Issues = []result.Issue{} } - fmt.Fprint(logutils.StdOut, string(outputJSON)) - return nil + return json.NewEncoder(p.w).Encode(res) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go index 9277cd66f2..3e3f82f580 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go @@ -1,11 +1,14 @@ package printers import ( - "context" "encoding/xml" + "fmt" + "io" + "sort" "strings" - "github.com/golangci/golangci-lint/pkg/logutils" + "golang.org/x/exp/maps" + "github.com/golangci/golangci-lint/pkg/result" ) @@ -31,17 +34,19 @@ type testCaseXML struct { type failureXML struct { Message string `xml:"message,attr"` + Type string `xml:"type,attr"` Content string `xml:",cdata"` } type JunitXML struct { + w io.Writer } -func NewJunitXML() *JunitXML { - return &JunitXML{} +func NewJunitXML(w io.Writer) *JunitXML { + return &JunitXML{w: w} } -func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { +func (p JunitXML) Print(issues []result.Issue) error { suites := make(map[string]testSuiteXML) // use a map to group by file for ind := range issues { @@ -56,8 +61,10 @@ func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { Name: i.FromLinter, ClassName: i.Pos.String(), Failure: failureXML{ - Message: i.Text, - Content: strings.Join(i.SourceLines, "\n"), + Type: i.Severity, + Message: i.Pos.String() + ": " + i.Text, + Content: fmt.Sprintf("%s: %s\nCategory: %s\nFile: %s\nLine: %d\nDetails: %s", + i.Severity, i.Text, i.FromLinter, i.Pos.Filename, i.Pos.Line, strings.Join(i.SourceLines, "\n")), }, } @@ -66,11 +73,13 @@ func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { } var res testSuitesXML - for _, val := range suites { - res.TestSuites = append(res.TestSuites, val) - } + res.TestSuites = maps.Values(suites) + + sort.Slice(res.TestSuites, func(i, j int) bool { + return res.TestSuites[i].Suite < res.TestSuites[j].Suite + }) - enc := xml.NewEncoder(logutils.StdOut) + enc := xml.NewEncoder(p.w) enc.Indent("", " ") if err := enc.Encode(res); err != nil { return err diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go index bfafb88e2a..ce3116fa4e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/printer.go @@ -1,11 +1,9 @@ package printers import ( - "context" - "github.com/golangci/golangci-lint/pkg/result" ) type Printer interface { - Print(ctx context.Context, issues []result.Issue) error + Print(issues []result.Issue) error } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go index d3cdce673d..8ede897402 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go @@ -1,7 +1,6 @@ package printers import ( - "context" "fmt" "io" "text/tabwriter" @@ -14,23 +13,33 @@ import ( type Tab struct { printLinterName bool - log logutils.Log + useColors bool + + log logutils.Log + w io.Writer } -func NewTab(printLinterName bool, log logutils.Log) *Tab { +func NewTab(printLinterName, useColors bool, log logutils.Log, w io.Writer) *Tab { return &Tab{ printLinterName: printLinterName, + useColors: useColors, log: log, + w: w, } } -func (p Tab) SprintfColored(ca color.Attribute, format string, args ...interface{}) string { +func (p *Tab) SprintfColored(ca color.Attribute, format string, args ...any) string { c := color.New(ca) + + if !p.useColors { + c.DisableColor() + } + return c.Sprintf(format, args...) } -func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { - w := tabwriter.NewWriter(logutils.StdOut, 0, 0, 2, ' ', 0) +func (p *Tab) Print(issues []result.Issue) error { + w := tabwriter.NewWriter(p.w, 0, 0, 2, ' ', 0) for i := range issues { p.printIssue(&issues[i], w) @@ -43,7 +52,7 @@ func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { return nil } -func (p Tab) printIssue(i *result.Issue, w io.Writer) { +func (p *Tab) printIssue(i *result.Issue, w io.Writer) { text := p.SprintfColored(color.FgRed, "%s", i.Text) if p.printLinterName { text = fmt.Sprintf("%s\t%s", i.FromLinter, text) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go new file mode 100644 index 0000000000..d3693e9971 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go @@ -0,0 +1,122 @@ +package printers + +import ( + "fmt" + "io" + "strings" + "unicode/utf8" + + "github.com/golangci/golangci-lint/pkg/result" +) + +// Field limits. +const ( + smallLimit = 255 + largeLimit = 4000 +) + +// TeamCity printer for TeamCity format. +type TeamCity struct { + w io.Writer + escaper *strings.Replacer +} + +// NewTeamCity output format outputs issues according to TeamCity service message format. +func NewTeamCity(w io.Writer) *TeamCity { + return &TeamCity{ + w: w, + // https://www.jetbrains.com/help/teamcity/service-messages.html#Escaped+Values + escaper: strings.NewReplacer( + "'", "|'", + "\n", "|n", + "\r", "|r", + "|", "||", + "[", "|[", + "]", "|]", + ), + } +} + +func (p *TeamCity) Print(issues []result.Issue) error { + uniqLinters := map[string]struct{}{} + + for i := range issues { + issue := issues[i] + + _, ok := uniqLinters[issue.FromLinter] + if !ok { + inspectionType := InspectionType{ + id: issue.FromLinter, + name: issue.FromLinter, + description: issue.FromLinter, + category: "Golangci-lint reports", + } + + _, err := inspectionType.Print(p.w, p.escaper) + if err != nil { + return err + } + + uniqLinters[issue.FromLinter] = struct{}{} + } + + instance := InspectionInstance{ + typeID: issue.FromLinter, + message: issue.Text, + file: issue.FilePath(), + line: issue.Line(), + severity: issue.Severity, + } + + _, err := instance.Print(p.w, p.escaper) + if err != nil { + return err + } + } + + return nil +} + +// InspectionType is the unique description of the conducted inspection. Each specific warning or +// an error in code (inspection instance) has an inspection type. +// https://www.jetbrains.com/help/teamcity/service-messages.html#Inspection+Type +type InspectionType struct { + id string // (mandatory) limited by 255 characters. + name string // (mandatory) limited by 255 characters. + description string // (mandatory) limited by 255 characters. + category string // (mandatory) limited by 4000 characters. +} + +func (i InspectionType) Print(w io.Writer, escaper *strings.Replacer) (int, error) { + return fmt.Fprintf(w, "##teamcity[InspectionType id='%s' name='%s' description='%s' category='%s']\n", + limit(i.id, smallLimit), limit(i.name, smallLimit), limit(escaper.Replace(i.description), largeLimit), limit(i.category, smallLimit)) +} + +// InspectionInstance reports a specific defect, warning, error message. +// Includes location, description, and various optional and custom attributes. +// https://www.jetbrains.com/help/teamcity/service-messages.html#Inspection+Instance +type InspectionInstance struct { + typeID string // (mandatory) limited by 255 characters. + message string // (optional) limited by 4000 characters. + file string // (mandatory) file path limited by 4000 characters. + line int // (optional) line of the file. + severity string // (optional) any linter severity. +} + +func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int, error) { + return fmt.Fprintf(w, "##teamcity[inspection typeId='%s' message='%s' file='%s' line='%d' SEVERITY='%s']\n", + limit(i.typeID, smallLimit), + limit(replacer.Replace(i.message), largeLimit), + limit(i.file, largeLimit), + i.line, strings.ToUpper(i.severity)) +} + +func limit(s string, max int) string { + var size, count int + for i := 0; i < max && count < len(s); i++ { + _, size = utf8.DecodeRuneInString(s[count:]) + count += size + } + + return s[:count] +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go index 1814528884..6e29c4b50f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go @@ -1,8 +1,8 @@ package printers import ( - "context" "fmt" + "io" "strings" "github.com/fatih/color" @@ -13,31 +13,34 @@ import ( type Text struct { printIssuedLine bool - useColors bool printLinterName bool + useColors bool log logutils.Log + w io.Writer } -func NewText(printIssuedLine, useColors, printLinterName bool, log logutils.Log) *Text { +func NewText(printIssuedLine, useColors, printLinterName bool, log logutils.Log, w io.Writer) *Text { return &Text{ printIssuedLine: printIssuedLine, - useColors: useColors, printLinterName: printLinterName, + useColors: useColors, log: log, + w: w, } } -func (p Text) SprintfColored(ca color.Attribute, format string, args ...interface{}) string { +func (p *Text) SprintfColored(ca color.Attribute, format string, args ...any) string { + c := color.New(ca) + if !p.useColors { - return fmt.Sprintf(format, args...) + c.DisableColor() } - c := color.New(ca) return c.Sprintf(format, args...) } -func (p *Text) Print(ctx context.Context, issues []result.Issue) error { +func (p *Text) Print(issues []result.Issue) error { for i := range issues { p.printIssue(&issues[i]) @@ -52,7 +55,7 @@ func (p *Text) Print(ctx context.Context, issues []result.Issue) error { return nil } -func (p Text) printIssue(i *result.Issue) { +func (p *Text) printIssue(i *result.Issue) { text := p.SprintfColored(color.FgRed, "%s", strings.TrimSpace(i.Text)) if p.printLinterName { text += fmt.Sprintf(" (%s)", i.FromLinter) @@ -61,16 +64,16 @@ func (p Text) printIssue(i *result.Issue) { if i.Pos.Column != 0 { pos += fmt.Sprintf(":%d", i.Pos.Column) } - fmt.Fprintf(logutils.StdOut, "%s: %s\n", pos, text) + fmt.Fprintf(p.w, "%s: %s\n", pos, text) } -func (p Text) printSourceCode(i *result.Issue) { +func (p *Text) printSourceCode(i *result.Issue) { for _, line := range i.SourceLines { - fmt.Fprintln(logutils.StdOut, line) + fmt.Fprintln(p.w, line) } } -func (p Text) printUnderLinePointer(i *result.Issue) { +func (p *Text) printUnderLinePointer(i *result.Issue) { // if column == 0 it means column is unknown (e.g. for gosec) if len(i.SourceLines) != 1 || i.Pos.Column == 0 { return @@ -87,5 +90,5 @@ func (p Text) printUnderLinePointer(i *result.Issue) { } } - fmt.Fprintf(logutils.StdOut, "%s%s\n", string(prefixRunes), p.SprintfColored(color.FgYellow, "^")) + fmt.Fprintf(p.w, "%s%s\n", string(prefixRunes), p.SprintfColored(color.FgYellow, "^")) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/report/log.go b/vendor/github.com/golangci/golangci-lint/pkg/report/log.go index 45ab6cae85..61665f28b7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/report/log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/report/log.go @@ -20,20 +20,20 @@ func NewLogWrapper(log logutils.Log, reportData *Data) *LogWrapper { } } -func (lw LogWrapper) Fatalf(format string, args ...interface{}) { +func (lw LogWrapper) Fatalf(format string, args ...any) { lw.origLog.Fatalf(format, args...) } -func (lw LogWrapper) Panicf(format string, args ...interface{}) { +func (lw LogWrapper) Panicf(format string, args ...any) { lw.origLog.Panicf(format, args...) } -func (lw LogWrapper) Errorf(format string, args ...interface{}) { +func (lw LogWrapper) Errorf(format string, args ...any) { lw.origLog.Errorf(format, args...) lw.rd.Error = fmt.Sprintf(format, args...) } -func (lw LogWrapper) Warnf(format string, args ...interface{}) { +func (lw LogWrapper) Warnf(format string, args ...any) { lw.origLog.Warnf(format, args...) w := Warning{ Tag: strings.Join(lw.tags, "/"), @@ -43,7 +43,7 @@ func (lw LogWrapper) Warnf(format string, args ...interface{}) { lw.rd.Warnings = append(lw.rd.Warnings, w) } -func (lw LogWrapper) Infof(format string, args ...interface{}) { +func (lw LogWrapper) Infof(format string, args ...any) { lw.origLog.Infof(format, args...) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 707a2b17cd..1e8cd30524 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -92,7 +92,7 @@ func (i *Issue) Fingerprint() string { } hash := md5.New() //nolint:gosec - _, _ = hash.Write([]byte(fmt.Sprintf("%s%s%s", i.Pos.Filename, i.Text, firstLine))) + _, _ = fmt.Fprintf(hash, "%s%s%s", i.Pos.Filename, i.Text, firstLine) return fmt.Sprintf("%X", hash.Sum(nil)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go index 57388f64fa..c7675fce8f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/autogenerated_exclude.go @@ -1,19 +1,18 @@ package processors import ( + "errors" "fmt" "go/parser" "go/token" "path/filepath" "strings" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) -var autogenDebugf = logutils.Debug("autogen_exclude") +var autogenDebugf = logutils.Debug(logutils.DebugKeyAutogenExclude) type ageFileSummary struct { isGenerated bool @@ -33,7 +32,7 @@ func NewAutogeneratedExclude() *AutogeneratedExclude { var _ Processor = &AutogeneratedExclude{} -func (p AutogeneratedExclude) Name() string { +func (p *AutogeneratedExclude) Name() string { return "autogenerated_exclude" } @@ -71,7 +70,7 @@ func (p *AutogeneratedExclude) shouldPassIssue(i *result.Issue) (bool, error) { } // isGenerated reports whether the source file is generated code. -// Using a bit laxer rules than https://golang.org/s/generatedcode to +// Using a bit laxer rules than https://go.dev/s/generatedcode to // match more generated code. See #48 and #72. func isGeneratedFileByComment(doc string) bool { const ( @@ -103,12 +102,12 @@ func (p *AutogeneratedExclude) getOrCreateFileSummary(i *result.Issue) (*ageFile p.fileSummaryCache[i.FilePath()] = fs if i.FilePath() == "" { - return nil, fmt.Errorf("no file path for issue") + return nil, errors.New("no file path for issue") } doc, err := getDoc(i.FilePath()) if err != nil { - return nil, errors.Wrapf(err, "failed to get doc of file %s", i.FilePath()) + return nil, fmt.Errorf("failed to get doc of file %s: %w", i.FilePath(), err) } fs.isGenerated = isGeneratedFileByComment(doc) @@ -120,7 +119,7 @@ func getDoc(filePath string) (string, error) { fset := token.NewFileSet() syntax, err := parser.ParseFile(fset, filePath, nil, parser.PackageClauseOnly|parser.ParseComments) if err != nil { - return "", errors.Wrap(err, "failed to parse file") + return "", fmt.Errorf("failed to parse file: %w", err) } var docLines []string @@ -131,4 +130,4 @@ func getDoc(filePath string) (string, error) { return strings.Join(docLines, "\n"), nil } -func (p AutogeneratedExclude) Finish() {} +func (p *AutogeneratedExclude) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go index b6ce4f2159..b5e138806b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/base_rule.go @@ -9,31 +9,36 @@ import ( ) type BaseRule struct { - Text string - Source string - Path string - Linters []string + Text string + Source string + Path string + PathExcept string + Linters []string } type baseRule struct { - text *regexp.Regexp - source *regexp.Regexp - path *regexp.Regexp - linters []string + text *regexp.Regexp + source *regexp.Regexp + path *regexp.Regexp + pathExcept *regexp.Regexp + linters []string } func (r *baseRule) isEmpty() bool { - return r.text == nil && r.source == nil && r.path == nil && len(r.linters) == 0 + return r.text == nil && r.source == nil && r.path == nil && r.pathExcept == nil && len(r.linters) == 0 } -func (r *baseRule) match(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { +func (r *baseRule) match(issue *result.Issue, files *fsutils.Files, log logutils.Log) bool { if r.isEmpty() { return false } if r.text != nil && !r.text.MatchString(issue.Text) { return false } - if r.path != nil && !r.path.MatchString(issue.FilePath()) { + if r.path != nil && !r.path.MatchString(files.WithPathPrefix(issue.FilePath())) { + return false + } + if r.pathExcept != nil && r.pathExcept.MatchString(issue.FilePath()) { return false } if len(r.linters) != 0 && !r.matchLinter(issue) { @@ -41,7 +46,7 @@ func (r *baseRule) match(issue *result.Issue, lineCache *fsutils.LineCache, log } // the most heavyweight checking last - if r.source != nil && !r.matchSource(issue, lineCache, log) { + if r.source != nil && !r.matchSource(issue, files.LineCache, log) { return false } @@ -58,7 +63,7 @@ func (r *baseRule) matchLinter(issue *result.Issue) bool { return false } -func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { // nolint:interfacer +func (r *baseRule) matchSource(issue *result.Issue, lineCache *fsutils.LineCache, log logutils.Log) bool { //nolint:interfacer sourceLine, errSourceLine := lineCache.GetLine(issue.FilePath(), issue.Line()) if errSourceLine != nil { log.Warnf("Failed to get line %s:%d from line cache: %s", issue.FilePath(), issue.Line(), errSourceLine) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go index c8793871ac..8e77237518 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/cgo.go @@ -1,11 +1,10 @@ package processors import ( + "fmt" "path/filepath" "strings" - "github.com/pkg/errors" - "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/result" ) @@ -37,7 +36,7 @@ func (p Cgo) Process(issues []result.Issue) ([]result.Issue, error) { if !filepath.IsAbs(i.FilePath()) { absPath, err := filepath.Abs(i.FilePath()) if err != nil { - return false, errors.Wrapf(err, "failed to build abs path for %q", i.FilePath()) + return false, fmt.Errorf("failed to build abs path for %q: %w", i.FilePath(), err) } issueFilePath = absPath } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go index fc4aba4b93..496d9c865c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/diff.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "os" "strings" @@ -13,21 +12,25 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) +const envGolangciDiffProcessorPatch = "GOLANGCI_DIFF_PROCESSOR_PATCH" + type Diff struct { onlyNew bool fromRev string patchFilePath string + wholeFiles bool patch string } var _ Processor = Diff{} -func NewDiff(onlyNew bool, fromRev, patchFilePath string) *Diff { +func NewDiff(onlyNew bool, fromRev, patchFilePath string, wholeFiles bool) *Diff { return &Diff{ onlyNew: onlyNew, fromRev: fromRev, patchFilePath: patchFilePath, - patch: os.Getenv("GOLANGCI_DIFF_PROCESSOR_PATCH"), + wholeFiles: wholeFiles, + patch: os.Getenv(envGolangciDiffProcessorPatch), } } @@ -42,9 +45,9 @@ func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { var patchReader io.Reader if p.patchFilePath != "" { - patch, err := ioutil.ReadFile(p.patchFilePath) + patch, err := os.ReadFile(p.patchFilePath) if err != nil { - return nil, fmt.Errorf("can't read from patch file %s: %s", p.patchFilePath, err) + return nil, fmt.Errorf("can't read from patch file %s: %w", p.patchFilePath, err) } patchReader = bytes.NewReader(patch) } else if p.patch != "" { @@ -54,9 +57,10 @@ func (p Diff) Process(issues []result.Issue) ([]result.Issue, error) { c := revgrep.Checker{ Patch: patchReader, RevisionFrom: p.fromRev, + WholeFiles: p.wholeFiles, } if err := c.Prepare(); err != nil { - return nil, fmt.Errorf("can't prepare diff by revgrep: %s", err) + return nil, fmt.Errorf("can't prepare diff by revgrep: %w", err) } return transformIssues(issues, func(i *result.Issue) *result.Issue { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go index d4d6569f4c..2f7e30b430 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/exclude_rules.go @@ -17,15 +17,15 @@ type ExcludeRule struct { } type ExcludeRules struct { - rules []excludeRule - lineCache *fsutils.LineCache - log logutils.Log + rules []excludeRule + files *fsutils.Files + log logutils.Log } -func NewExcludeRules(rules []ExcludeRule, lineCache *fsutils.LineCache, log logutils.Log) *ExcludeRules { +func NewExcludeRules(rules []ExcludeRule, files *fsutils.Files, log logutils.Log) *ExcludeRules { r := &ExcludeRules{ - lineCache: lineCache, - log: log, + files: files, + log: log, } r.rules = createRules(rules, "(?i)") @@ -44,7 +44,12 @@ func createRules(rules []ExcludeRule, prefix string) []excludeRule { parsedRule.source = regexp.MustCompile(prefix + rule.Source) } if rule.Path != "" { - parsedRule.path = regexp.MustCompile(rule.Path) + path := fsutils.NormalizePathInRegex(rule.Path) + parsedRule.path = regexp.MustCompile(path) + } + if rule.PathExcept != "" { + pathExcept := fsutils.NormalizePathInRegex(rule.PathExcept) + parsedRule.pathExcept = regexp.MustCompile(pathExcept) } parsedRules = append(parsedRules, parsedRule) } @@ -58,7 +63,7 @@ func (p ExcludeRules) Process(issues []result.Issue) ([]result.Issue, error) { return filterIssues(issues, func(i *result.Issue) bool { for _, rule := range p.rules { rule := rule - if rule.match(i, p.lineCache, p.log) { + if rule.match(i, p.files, p.log) { return false } } @@ -75,10 +80,10 @@ type ExcludeRulesCaseSensitive struct { *ExcludeRules } -func NewExcludeRulesCaseSensitive(rules []ExcludeRule, lineCache *fsutils.LineCache, log logutils.Log) *ExcludeRulesCaseSensitive { +func NewExcludeRulesCaseSensitive(rules []ExcludeRule, files *fsutils.Files, log logutils.Log) *ExcludeRulesCaseSensitive { r := &ExcludeRules{ - lineCache: lineCache, - log: log, + files: files, + log: log, } r.rules = createRules(rules, "") diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go index 96540245b3..2aaafbf58b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/filename_unadjuster.go @@ -97,7 +97,7 @@ func NewFilenameUnadjuster(pkgs []*packages.Package, log logutils.Log) *Filename } } -func (p FilenameUnadjuster) Name() string { +func (p *FilenameUnadjuster) Name() string { return "filename_unadjuster" } @@ -128,4 +128,4 @@ func (p *FilenameUnadjuster) Process(issues []result.Issue) ([]result.Issue, err }), nil } -func (FilenameUnadjuster) Finish() {} +func (p *FilenameUnadjuster) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index 17f519e32b..a79a846288 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -8,8 +8,7 @@ import ( "sort" "strings" - "github.com/pkg/errors" - + "github.com/golangci/golangci-lint/internal/robustio" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/logutils" @@ -17,6 +16,8 @@ import ( "github.com/golangci/golangci-lint/pkg/timeutils" ) +var _ Processor = Fixer{} + type Fixer struct { cfg *config.Config log logutils.Log @@ -37,9 +38,9 @@ func (f Fixer) printStat() { f.sw.PrintStages() } -func (f Fixer) Process(issues []result.Issue) []result.Issue { +func (f Fixer) Process(issues []result.Issue) ([]result.Issue, error) { if !f.cfg.Issues.NeedFix { - return issues + return issues, nil } outIssues := make([]result.Issue, 0, len(issues)) @@ -68,22 +69,28 @@ func (f Fixer) Process(issues []result.Issue) []result.Issue { } f.printStat() - return outIssues + return outIssues, nil } +func (f Fixer) Name() string { + return "fixer" +} + +func (f Fixer) Finish() {} + func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { // TODO: don't read the whole file into memory: read line by line; // can't just use bufio.scanner: it has a line length limit origFileData, err := f.fileCache.GetFileBytes(filePath) if err != nil { - return errors.Wrapf(err, "failed to get file bytes for %s", filePath) + return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err) } origFileLines := bytes.Split(origFileData, []byte("\n")) tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) tmpOutFile, err := os.Create(tmpFileName) if err != nil { - return errors.Wrapf(err, "failed to make file %s", tmpFileName) + return fmt.Errorf("failed to make file %s: %w", tmpFileName, err) } // merge multiple issues per line into one issue @@ -104,14 +111,14 @@ func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { if err = f.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { tmpOutFile.Close() - os.Remove(tmpOutFile.Name()) + _ = robustio.RemoveAll(tmpOutFile.Name()) return err } tmpOutFile.Close() - if err = os.Rename(tmpOutFile.Name(), filePath); err != nil { - os.Remove(tmpOutFile.Name()) - return errors.Wrapf(err, "failed to rename %s -> %s", tmpOutFile.Name(), filePath) + if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil { + _ = robustio.RemoveAll(tmpOutFile.Name()) + return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err) } return nil @@ -240,7 +247,7 @@ func (f Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmp outLine += "\n" } if _, err := tmpOutFile.WriteString(outLine); err != nil { - return errors.Wrap(err, "failed to write output line") + return fmt.Errorf("failed to write output line: %w", err) } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/issues.go similarity index 65% rename from vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go rename to vendor/github.com/golangci/golangci-lint/pkg/result/processors/issues.go index 7108fd3b3c..4691be38a4 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/issues.go @@ -1,11 +1,7 @@ package processors import ( - "path/filepath" - "regexp" - "strings" - - "github.com/pkg/errors" + "fmt" "github.com/golangci/golangci-lint/pkg/result" ) @@ -26,7 +22,7 @@ func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, for i := range issues { ok, err := filter(&issues[i]) if err != nil { - return nil, errors.Wrapf(err, "can't filter issue %#v", issues[i]) + return nil, fmt.Errorf("can't filter issue %#v: %w", issues[i], err) } if ok { @@ -48,15 +44,3 @@ func transformIssues(issues []result.Issue, transform func(i *result.Issue) *res return retIssues } - -var separatorToReplace = regexp.QuoteMeta(string(filepath.Separator)) - -func normalizePathInRegex(path string) string { - if filepath.Separator == '/' { - return path - } - - // This replacing should be safe because "/" are disallowed in Windows - // https://docs.microsoft.com/ru-ru/windows/win32/fileio/naming-a-file - return strings.ReplaceAll(path, "/", separatorToReplace) -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go index c58666c566..649ed86ae9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -24,7 +24,7 @@ func NewMaxFromLinter(limit int, log logutils.Log, cfg *config.Config) *MaxFromL } } -func (p MaxFromLinter) Name() string { +func (p *MaxFromLinter) Name() string { return "max_from_linter" } @@ -44,7 +44,7 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { }), nil } -func (p MaxFromLinter) Finish() { +func (p *MaxFromLinter) Finish() { walkStringToIntMapSortedByValue(p.lc, func(linter string, count int) { if count > p.limit { p.log.Infof("%d/%d issues from linter %s were hidden, use --max-issues-per-linter", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go index e36446c9fc..64182e3e22 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_per_file_from_linter.go @@ -31,7 +31,7 @@ func NewMaxPerFileFromLinter(cfg *config.Config) *MaxPerFileFromLinter { } } -func (p MaxPerFileFromLinter) Name() string { +func (p *MaxPerFileFromLinter) Name() string { return "max_per_file_from_linter" } @@ -56,4 +56,4 @@ func (p *MaxPerFileFromLinter) Process(issues []result.Issue) ([]result.Issue, e }), nil } -func (p MaxPerFileFromLinter) Finish() {} +func (p *MaxPerFileFromLinter) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go index 84fdf0c053..391ae5fa7f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -28,7 +28,7 @@ func NewMaxSameIssues(limit int, log logutils.Log, cfg *config.Config) *MaxSameI } } -func (MaxSameIssues) Name() string { +func (p *MaxSameIssues) Name() string { return "max_same_issues" } @@ -48,7 +48,7 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { }), nil } -func (p MaxSameIssues) Finish() { +func (p *MaxSameIssues) Finish() { walkStringToIntMapSortedByValue(p.tc, func(text string, count int) { if count > p.limit { p.log.Infof("%d/%d issues with text %q were hidden, use --max-same-issues", diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go index 0788a7160e..a72dd1ef29 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/nolint.go @@ -1,7 +1,7 @@ package processors import ( - "fmt" + "errors" "go/ast" "go/parser" "go/token" @@ -9,6 +9,8 @@ import ( "sort" "strings" + "golang.org/x/exp/maps" + "github.com/golangci/golangci-lint/pkg/golinters" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -16,7 +18,8 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -var nolintDebugf = logutils.Debug("nolint") +var nolintDebugf = logutils.Debug(logutils.DebugKeyNolint) +var nolintRe = regexp.MustCompile(`^nolint( |:|$)`) type ignoredRange struct { linters []string @@ -32,7 +35,7 @@ func (i *ignoredRange) doesMatch(issue *result.Issue) bool { } // only allow selective nolinting of nolintlint - nolintFoundForLinter := len(i.linters) == 0 && issue.FromLinter != golinters.NolintlintName + nolintFoundForLinter := len(i.linters) == 0 && issue.FromLinter != golinters.NoLintLintName for _, linterName := range i.linters { if linterName == issue.FromLinter { @@ -46,8 +49,8 @@ func (i *ignoredRange) doesMatch(issue *result.Issue) bool { } // handle possible unused nolint directives - // nolintlint generates potential issues for every nolint directive and they are filtered out here - if issue.FromLinter == golinters.NolintlintName && issue.ExpectNoLint { + // nolintlint generates potential issues for every nolint directive, and they are filtered out here + if issue.FromLinter == golinters.NoLintLintName && issue.ExpectNoLint { if issue.ExpectedNoLintLinter != "" { return i.matchedIssueFromLinter[issue.ExpectedNoLintLinter] } @@ -84,7 +87,7 @@ func NewNolint(log logutils.Log, dbManager *lintersdb.Manager, enabledLinters ma var _ Processor = &Nolint{} -func (p Nolint) Name() string { +func (p *Nolint) Name() string { return "nolint" } @@ -104,7 +107,7 @@ func (p *Nolint) getOrCreateFileData(i *result.Issue) (*fileData, error) { p.cache[i.FilePath()] = fd if i.FilePath() == "" { - return nil, fmt.Errorf("no file path for issue") + return nil, errors.New("no file path for issue") } // TODO: migrate this parsing to go/analysis facts @@ -147,7 +150,7 @@ func (p *Nolint) buildIgnoredRangesForFile(f *ast.File, fset *token.FileSet, fil func (p *Nolint) shouldPassIssue(i *result.Issue) (bool, error) { nolintDebugf("got issue: %v", *i) - if i.FromLinter == golinters.NolintlintName && i.ExpectNoLint && i.ExpectedNoLintLinter != "" { + if i.FromLinter == golinters.NoLintLintName && i.ExpectNoLint && i.ExpectedNoLintLinter != "" { // don't expect disabled linters to cover their nolint statements nolintDebugf("enabled linters: %v", p.enabledLinters) if p.enabledLinters[i.ExpectedNoLintLinter] == nil { @@ -234,7 +237,7 @@ func (p *Nolint) extractFileCommentsInlineRanges(fset *token.FileSet, comments . func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *token.FileSet) *ignoredRange { text = strings.TrimLeft(text, "/ ") - if ok, _ := regexp.MatchString(`^nolint( |:|$)`, text); !ok { + if !nolintRe.MatchString(text) { return nil } @@ -251,7 +254,7 @@ func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *to } } - if !strings.HasPrefix(text, "nolint:") { + if strings.HasPrefix(text, "nolint:all") || !strings.HasPrefix(text, "nolint:") { return buildRange(nil) // ignore all linters } @@ -259,8 +262,12 @@ func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *to var linters []string text = strings.Split(text, "//")[0] // allow another comment after this comment linterItems := strings.Split(strings.TrimPrefix(text, "nolint:"), ",") - for _, linter := range linterItems { - linterName := strings.ToLower(strings.TrimSpace(linter)) + for _, item := range linterItems { + linterName := strings.ToLower(strings.TrimSpace(item)) + if linterName == "all" { + p.unknownLintersSet = map[string]bool{} + return buildRange(nil) + } lcs := p.dbManager.GetLinterConfigs(linterName) if lcs == nil { @@ -279,15 +286,12 @@ func (p *Nolint) extractInlineRangeFromComment(text string, g ast.Node, fset *to return buildRange(linters) } -func (p Nolint) Finish() { +func (p *Nolint) Finish() { if len(p.unknownLintersSet) == 0 { return } - unknownLinters := []string{} - for name := range p.unknownLintersSet { - unknownLinters = append(unknownLinters, name) - } + unknownLinters := maps.Keys(p.unknownLintersSet) sort.Strings(unknownLinters) p.log.Warnf("Found unknown linters in //nolint directives: %s", strings.Join(unknownLinters, ", ")) @@ -301,7 +305,7 @@ func (issues sortWithNolintlintLast) Len() int { } func (issues sortWithNolintlintLast) Less(i, j int) bool { - return issues[i].FromLinter != golinters.NolintlintName && issues[j].FromLinter == golinters.NolintlintName + return issues[i].FromLinter != golinters.NoLintLintName && issues[j].FromLinter == golinters.NoLintLintName } func (issues sortWithNolintlintLast) Swap(i, j int) { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go index 5ce940b39b..f6b885011b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_prefixer.go @@ -1,8 +1,7 @@ package processors import ( - "path" - + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -27,7 +26,7 @@ func (*PathPrefixer) Name() string { func (p *PathPrefixer) Process(issues []result.Issue) ([]result.Issue, error) { if p.prefix != "" { for i := range issues { - issues[i].Pos.Filename = path.Join(p.prefix, issues[i].Pos.Filename) + issues[i].Pos.Filename = fsutils.WithPathPrefix(p.prefix, issues[i].Pos.Filename) } } return issues, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go index 484f7f1f11..6b66bea8b0 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/path_shortener.go @@ -31,8 +31,8 @@ func (p PathShortener) Name() string { func (p PathShortener) Process(issues []result.Issue) ([]result.Issue, error) { return transformIssues(issues, func(i *result.Issue) *result.Issue { newI := i - newI.Text = strings.Replace(newI.Text, p.wd+"/", "", -1) - newI.Text = strings.Replace(newI.Text, p.wd, "", -1) + newI.Text = strings.ReplaceAll(newI.Text, p.wd+"/", "") + newI.Text = strings.ReplaceAll(newI.Text, p.wd, "") return newI }), nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go index 7c9a4c1d63..0a4a643b71 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity_rules.go @@ -21,13 +21,13 @@ type SeverityRule struct { type SeverityRules struct { defaultSeverity string rules []severityRule - lineCache *fsutils.LineCache + files *fsutils.Files log logutils.Log } -func NewSeverityRules(defaultSeverity string, rules []SeverityRule, lineCache *fsutils.LineCache, log logutils.Log) *SeverityRules { +func NewSeverityRules(defaultSeverity string, rules []SeverityRule, files *fsutils.Files, log logutils.Log) *SeverityRules { r := &SeverityRules{ - lineCache: lineCache, + files: files, log: log, defaultSeverity: defaultSeverity, } @@ -49,7 +49,12 @@ func createSeverityRules(rules []SeverityRule, prefix string) []severityRule { parsedRule.source = regexp.MustCompile(prefix + rule.Source) } if rule.Path != "" { - parsedRule.path = regexp.MustCompile(rule.Path) + path := fsutils.NormalizePathInRegex(rule.Path) + parsedRule.path = regexp.MustCompile(path) + } + if rule.PathExcept != "" { + pathExcept := fsutils.NormalizePathInRegex(rule.PathExcept) + parsedRule.pathExcept = regexp.MustCompile(pathExcept) } parsedRules = append(parsedRules, parsedRule) } @@ -69,7 +74,7 @@ func (p SeverityRules) Process(issues []result.Issue) ([]result.Issue, error) { ruleSeverity = rule.severity } - if rule.match(i, p.lineCache, p.log) { + if rule.match(i, p.files, p.log) { i.Severity = ruleSeverity return i } @@ -89,9 +94,9 @@ type SeverityRulesCaseSensitive struct { } func NewSeverityRulesCaseSensitive(defaultSeverity string, rules []SeverityRule, - lineCache *fsutils.LineCache, log logutils.Log) *SeverityRulesCaseSensitive { + files *fsutils.Files, log logutils.Log) *SeverityRulesCaseSensitive { r := &SeverityRules{ - lineCache: lineCache, + files: files, log: log, defaultSeverity: defaultSeverity, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go index 6488c109ec..e71495fd0b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_dirs.go @@ -1,12 +1,12 @@ package processors import ( + "fmt" "path/filepath" "regexp" "strings" - "github.com/pkg/errors" - + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" ) @@ -22,19 +22,20 @@ type SkipDirs struct { skippedDirs map[string]*skipStat absArgsDirs []string skippedDirsCache map[string]bool + pathPrefix string } -var _ Processor = SkipFiles{} +var _ Processor = (*SkipDirs)(nil) const goFileSuffix = ".go" -func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDirs, error) { +func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string, pathPrefix string) (*SkipDirs, error) { var patternsRe []*regexp.Regexp for _, p := range patterns { - p = normalizePathInRegex(p) + p = fsutils.NormalizePathInRegex(p) patternRe, err := regexp.Compile(p) if err != nil { - return nil, errors.Wrapf(err, "can't compile regexp %q", p) + return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) } patternsRe = append(patternsRe, patternRe) } @@ -51,7 +52,7 @@ func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDi absArg, err := filepath.Abs(arg) if err != nil { - return nil, errors.Wrapf(err, "failed to abs-ify arg %q", arg) + return nil, fmt.Errorf("failed to abs-ify arg %q: %w", arg, err) } absArgsDirs = append(absArgsDirs, absArg) } @@ -62,6 +63,7 @@ func NewSkipDirs(patterns []string, log logutils.Log, runArgs []string) (*SkipDi skippedDirs: map[string]*skipStat{}, absArgsDirs: absArgsDirs, skippedDirsCache: map[string]bool{}, + pathPrefix: pathPrefix, }, nil } @@ -120,8 +122,9 @@ func (p *SkipDirs) shouldPassIssueDirs(issueRelDir, issueAbsDir string) bool { // The alternative solution is to find relative to args path, but it has // disadvantages (https://github.com/golangci/golangci-lint/pull/313). + path := fsutils.WithPathPrefix(p.pathPrefix, issueRelDir) for _, pattern := range p.patterns { - if pattern.MatchString(issueRelDir) { + if pattern.MatchString(path) { ps := pattern.String() if p.skippedDirs[issueRelDir] == nil { p.skippedDirs[issueRelDir] = &skipStat{ diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go index 522b07e4f2..6c1c586950 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/skip_files.go @@ -4,28 +4,31 @@ import ( "fmt" "regexp" + "github.com/golangci/golangci-lint/pkg/fsutils" "github.com/golangci/golangci-lint/pkg/result" ) type SkipFiles struct { - patterns []*regexp.Regexp + patterns []*regexp.Regexp + pathPrefix string } -var _ Processor = SkipFiles{} +var _ Processor = (*SkipFiles)(nil) -func NewSkipFiles(patterns []string) (*SkipFiles, error) { +func NewSkipFiles(patterns []string, pathPrefix string) (*SkipFiles, error) { var patternsRe []*regexp.Regexp for _, p := range patterns { - p = normalizePathInRegex(p) + p = fsutils.NormalizePathInRegex(p) patternRe, err := regexp.Compile(p) if err != nil { - return nil, fmt.Errorf("can't compile regexp %q: %s", p, err) + return nil, fmt.Errorf("can't compile regexp %q: %w", p, err) } patternsRe = append(patternsRe, patternRe) } return &SkipFiles{ - patterns: patternsRe, + patterns: patternsRe, + pathPrefix: pathPrefix, }, nil } @@ -39,8 +42,9 @@ func (p SkipFiles) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssues(issues, func(i *result.Issue) bool { - for _, p := range p.patterns { - if p.MatchString(i.FilePath()) { + path := fsutils.WithPathPrefix(p.pathPrefix, i.FilePath()) + for _, pattern := range p.patterns { + if pattern.MatchString(path) { return false } } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go index e726c3adfe..740c4fa8c3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go @@ -9,7 +9,7 @@ import ( ) // Base propose of this functionality to sort results (issues) -// produced by various linters by analyzing code. We achieving this +// produced by various linters by analyzing code. We're achieving this // by sorting results.Issues using processor step, and chain based // rules that can compare different properties of the Issues struct. @@ -63,7 +63,6 @@ func (c compareResult) isNeutral() bool { return c == None || c == Equal } -//nolint:exhaustive func (c compareResult) String() string { switch c { case Less: @@ -91,10 +90,8 @@ var ( type ByName struct{ next comparator } -//nolint:golint func (cmp ByName) Next() comparator { return cmp.next } -//nolint:golint func (cmp ByName) Compare(a, b *result.Issue) compareResult { var res compareResult @@ -111,10 +108,8 @@ func (cmp ByName) Compare(a, b *result.Issue) compareResult { type ByLine struct{ next comparator } -//nolint:golint func (cmp ByLine) Next() comparator { return cmp.next } -//nolint:golint func (cmp ByLine) Compare(a, b *result.Issue) compareResult { var res compareResult @@ -131,10 +126,8 @@ func (cmp ByLine) Compare(a, b *result.Issue) compareResult { type ByColumn struct{ next comparator } -//nolint:golint func (cmp ByColumn) Next() comparator { return cmp.next } -//nolint:golint func (cmp ByColumn) Compare(a, b *result.Issue) compareResult { var res compareResult diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go index 17167dde5d..dc0e1e8cf6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -22,7 +22,7 @@ func NewUniqByLine(cfg *config.Config) *UniqByLine { var _ Processor = &UniqByLine{} -func (p UniqByLine) Name() string { +func (p *UniqByLine) Name() string { return "uniq_by_line" } @@ -55,4 +55,4 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { }), nil } -func (p UniqByLine) Finish() {} +func (p *UniqByLine) Finish() {} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go b/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go deleted file mode 100644 index cb89e34e0c..0000000000 --- a/vendor/github.com/golangci/golangci-lint/pkg/sliceutil/sliceutil.go +++ /dev/null @@ -1,17 +0,0 @@ -package sliceutil - -// IndexOf get the index of the given value in the given string slice, -// or -1 if not found. -func IndexOf(slice []string, value string) int { - for i, v := range slice { - if v == value { - return i - } - } - return -1 -} - -// Contains check if a string slice contains a value. -func Contains(slice []string, value string) bool { - return IndexOf(slice, value) != -1 -} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go index 9628bd80f2..d944dea2ea 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go @@ -15,10 +15,10 @@ const noStagesText = "no stages" type Stopwatch struct { name string startedAt time.Time - stages map[string]time.Duration log logutils.Log - sync.Mutex + stages map[string]time.Duration + mu sync.Mutex } func NewStopwatch(name string, log logutils.Log) *Stopwatch { @@ -36,7 +36,7 @@ type stageDuration struct { } func (s *Stopwatch) stageDurationsSorted() []stageDuration { - stageDurations := []stageDuration{} + stageDurations := make([]stageDuration, 0, len(s.stages)) for n, d := range s.stages { stageDurations = append(stageDurations, stageDuration{ name: n, @@ -56,7 +56,7 @@ func (s *Stopwatch) sprintStages() string { stageDurations := s.stageDurationsSorted() - stagesStrings := []string{} + stagesStrings := make([]string, 0, len(stageDurations)) for _, s := range stageDurations { stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) } @@ -71,7 +71,7 @@ func (s *Stopwatch) sprintTopStages(n int) string { stageDurations := s.stageDurationsSorted() - stagesStrings := []string{} + var stagesStrings []string for i := 0; i < len(stageDurations) && i < n; i++ { s := stageDurations[i] stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) @@ -110,7 +110,7 @@ func (s *Stopwatch) TrackStage(name string, f func()) { startedAt := time.Now() f() - s.Lock() + s.mu.Lock() s.stages[name] += time.Since(startedAt) - s.Unlock() + s.mu.Unlock() } diff --git a/vendor/github.com/golangci/misspell/.gitignore b/vendor/github.com/golangci/misspell/.gitignore index b1b707e326..5e5c368f87 100644 --- a/vendor/github.com/golangci/misspell/.gitignore +++ b/vendor/github.com/golangci/misspell/.gitignore @@ -2,6 +2,9 @@ dist/ bin/ vendor/ +.idea/ +/misspell + # editor turds *~ *.gz diff --git a/vendor/github.com/golangci/misspell/.golangci.yml b/vendor/github.com/golangci/misspell/.golangci.yml new file mode 100644 index 0000000000..31c566eab3 --- /dev/null +++ b/vendor/github.com/golangci/misspell/.golangci.yml @@ -0,0 +1,109 @@ +run: + timeout: 2m + skip-files: [] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + - shadow # FIXME(ldez) must be fixed + gocyclo: + min-complexity: 16 + goconst: + min-len: 3 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + gofumpt: + extra-rules: true + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + godox: + keywords: + - FIXME + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + - exitAfterDefer # FIXME(ldez) must be fixed + - ifElseChain # FIXME(ldez) must be fixed + settings: + hugeParam: + sizeThreshold: 100 + forbidigo: + forbid: + - '^print(ln)?$' + - '^panic$' + - '^spew\.Print(f|ln)?$' + - '^spew\.Dump$' + +linters: + enable-all: true + disable: + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated + - execinquery # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - sqlclosecheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - dupl + - exhaustive + - exhaustruct + - forbidigo + - gochecknoglobals + - gochecknoinits + - goerr113 + - gomnd + - lll + - nilnil + - nlreturn + - paralleltest + - prealloc + - testpackage + - tparallel + - varnamelen + - wrapcheck + - wsl + - misspell + - gosec # FIXME(ldez) must be fixed + - errcheck # FIXME(ldez) must be fixed + - nonamedreturns # FIXME(ldez) must be fixed + - nakedret # FIXME(ldez) must be fixed + +issues: + exclude-use-default: false + max-per-linter: 0 + max-same-issues: 0 + exclude: + - 'ST1000: at least one file in a package should have a package comment' + - 'package-comments: should have a package comment' + exclude-rules: + - path: .*_test.go + linters: + - funlen + - goconst diff --git a/vendor/github.com/golangci/misspell/.travis.yml b/vendor/github.com/golangci/misspell/.travis.yml deleted file mode 100644 index e63e6c2bdc..0000000000 --- a/vendor/github.com/golangci/misspell/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -sudo: required -dist: trusty -group: edge -language: go -go: - - "1.10" -git: - depth: 1 - -script: - - ./scripts/travis.sh - -# calls goreleaser when a new tag is pushed -deploy: -- provider: script - skip_cleanup: true - script: curl -sL http://git.io/goreleaser | bash - on: - tags: true - condition: $TRAVIS_OS_NAME = linux diff --git a/vendor/github.com/golangci/misspell/Dockerfile b/vendor/github.com/golangci/misspell/Dockerfile index b8ea37b4c5..788ce3a775 100644 --- a/vendor/github.com/golangci/misspell/Dockerfile +++ b/vendor/github.com/golangci/misspell/Dockerfile @@ -1,16 +1,13 @@ -FROM golang:1.10.0-alpine +FROM golang:1.19-alpine # cache buster -RUN echo 4 +RUN echo 4 # git is needed for "go get" below RUN apk add --no-cache git make # these are my standard testing / linting tools RUN /bin/true \ - && go get -u github.com/golang/dep/cmd/dep \ - && go get -u github.com/alecthomas/gometalinter \ - && gometalinter --install \ && rm -rf /go/src /go/pkg # # * SCOWL word list @@ -35,3 +32,4 @@ RUN /bin/true \ && wget -O /scowl-wl/words-US-60.txt ${SOURCE_US} \ && wget -O /scowl-wl/words-GB-ise-60.txt ${SOURCE_GB_ISE} +RUN git config --global --add safe.directory "/go/src/github.com/golangci/misspell" diff --git a/vendor/github.com/golangci/misspell/Gopkg.lock b/vendor/github.com/golangci/misspell/Gopkg.lock deleted file mode 100644 index 90ed45115f..0000000000 --- a/vendor/github.com/golangci/misspell/Gopkg.lock +++ /dev/null @@ -1,24 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings" - ] - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "087ea4c49358ea8258ad9edfe514cd5ce9975c889c258e5ec7b5d2b720aae113" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/golangci/misspell/Gopkg.toml b/vendor/github.com/golangci/misspell/Gopkg.toml deleted file mode 100644 index e9b8e6a45a..0000000000 --- a/vendor/github.com/golangci/misspell/Gopkg.toml +++ /dev/null @@ -1,34 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/gobwas/glob" - version = "0.2.3" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/github.com/golangci/misspell/LICENSE b/vendor/github.com/golangci/misspell/LICENSE index 423e1f9e0f..bfcfcd3013 100644 --- a/vendor/github.com/golangci/misspell/LICENSE +++ b/vendor/github.com/golangci/misspell/LICENSE @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/golangci/misspell/Makefile b/vendor/github.com/golangci/misspell/Makefile index 862ab77b0d..783f977cb4 100644 --- a/vendor/github.com/golangci/misspell/Makefile +++ b/vendor/github.com/golangci/misspell/Makefile @@ -1,17 +1,18 @@ -CONTAINER=nickg/misspell +CONTAINER=golangci/misspell + +default: lint test build install: ## install misspell into GOPATH/bin go install ./cmd/misspell -build: hooks ## build and lint misspell - ./scripts/build.sh +build: ## build misspell + go build ./cmd/misspell test: ## run all tests - go test . + go test -v . -# real publishing is done only by travis -publish: ## test goreleaser - ./scripts/goreleaser-dryrun.sh +lint: ## run linter + golangci-lint run # the grep in line 2 is to remove misspellings in the spelling dictionary # that trigger false positives!! @@ -37,31 +38,22 @@ clean: ## clean up time go clean ./... git gc --aggressive -ci: ## run test like travis-ci does, requires docker +ci: docker-build ## run test like travis-ci does, requires docker docker run --rm \ - -v $(PWD):/go/src/github.com/client9/misspell \ - -w /go/src/github.com/client9/misspell \ + -v $(PWD):/go/src/github.com/golangci/misspell \ + -w /go/src/github.com/golangci/misspell \ ${CONTAINER} \ - make build falsepositives + make install falsepositives docker-build: ## build a docker test image docker build -t ${CONTAINER} . -docker-pull: ## pull latest test image - docker pull ${CONTAINER} - docker-console: ## log into the test image docker run --rm -it \ - -v $(PWD):/go/src/github.com/client9/misspell \ - -w /go/src/github.com/client9/misspell \ + -v $(PWD):/go/src/github.com/golangci/misspell \ + -w /go/src/github.com/golangci/misspell \ ${CONTAINER} sh -.git/hooks/pre-commit: scripts/pre-commit.sh - cp -f scripts/pre-commit.sh .git/hooks/pre-commit -.git/hooks/commit-msg: scripts/commit-msg.sh - cp -f scripts/commit-msg.sh .git/hooks/commit-msg -hooks: .git/hooks/pre-commit .git/hooks/commit-msg ## install git precommit hooks - .PHONY: help ci console docker-build bench # https://www.client9.com/self-documenting-makefiles/ @@ -69,6 +61,6 @@ help: @awk -F ':|##' '/^[^\t].+?:.*?##/ {\ printf "\033[36m%-30s\033[0m %s\n", $$1, $$NF \ }' $(MAKEFILE_LIST) -.DEFAULT_GOAL=help +.DEFAULT_GOAL=default .PHONY=help diff --git a/vendor/github.com/golangci/misspell/README.md b/vendor/github.com/golangci/misspell/README.md index 5b68af04da..cccd04996f 100644 --- a/vendor/github.com/golangci/misspell/README.md +++ b/vendor/github.com/golangci/misspell/README.md @@ -19,7 +19,7 @@ Both will install as `./bin/misspell`. You can adjust the download location usi If you use [Go](https://golang.org/), the best way to run `misspell` is by using [gometalinter](#gometalinter). Otherwise, install `misspell` the old-fashioned way: ``` -go get -u github.com/client9/misspell/cmd/misspell +go install github.com/client9/misspell/cmd/misspell@latest ``` and misspell will be in your `GOPATH` diff --git a/vendor/github.com/golangci/misspell/ascii.go b/vendor/github.com/golangci/misspell/ascii.go index 1430718d6a..d60af5a8da 100644 --- a/vendor/github.com/golangci/misspell/ascii.go +++ b/vendor/github.com/golangci/misspell/ascii.go @@ -1,7 +1,7 @@ package misspell -// ByteToUpper converts an ascii byte to upper cases -// Uses a branchless algorithm +// ByteToUpper converts an ascii byte to upper cases. +// Uses a branch-less algorithm. func ByteToUpper(x byte) byte { b := byte(0x80) | x c := b - byte(0x61) @@ -10,8 +10,8 @@ func ByteToUpper(x byte) byte { return x - (e >> 2) } -// ByteToLower converts an ascii byte to lower case -// uses a branchless algorithm +// ByteToLower converts an ascii byte to lower case. +// Uses a branch-less algorithm. func ByteToLower(eax byte) byte { ebx := eax&byte(0x7f) + byte(0x25) ebx = ebx&byte(0x7f) + byte(0x1a) @@ -19,7 +19,7 @@ func ByteToLower(eax byte) byte { return eax + ebx } -// ByteEqualFold does ascii compare, case insensitive +// ByteEqualFold does ascii compare, case insensitive. func ByteEqualFold(a, b byte) bool { return a == b || ByteToLower(a) == ByteToLower(b) } @@ -27,7 +27,7 @@ func ByteEqualFold(a, b byte) bool { // StringEqualFold ASCII case-insensitive comparison // golang toUpper/toLower for both bytes and strings // appears to be Unicode based which is super slow -// based from https://codereview.appspot.com/5180044/patch/14007/21002 +// based from https://codereview.appspot.com/5180044/patch/14007/21002. func StringEqualFold(s1, s2 string) bool { if len(s1) != len(s2) { return false @@ -47,9 +47,7 @@ func StringEqualFold(s1, s2 string) bool { return true } -// StringHasPrefixFold is similar to strings.HasPrefix but comparison -// is done ignoring ASCII case. -// / +// StringHasPrefixFold is similar to strings.HasPrefix but comparison is done ignoring ASCII case. func StringHasPrefixFold(s1, s2 string) bool { // prefix is bigger than input --> false if len(s1) < len(s2) { diff --git a/vendor/github.com/golangci/misspell/case.go b/vendor/github.com/golangci/misspell/case.go index 2ea3850dfa..0b580bedbc 100644 --- a/vendor/github.com/golangci/misspell/case.go +++ b/vendor/github.com/golangci/misspell/case.go @@ -4,10 +4,10 @@ import ( "strings" ) -// WordCase is an enum of various word casing styles +// WordCase is an enum of various word casing styles. type WordCase int -// Various WordCase types.. likely to be not correct +// Various WordCase types... likely to be not correct. const ( CaseUnknown WordCase = iota CaseLower @@ -15,7 +15,7 @@ const ( CaseTitle ) -// CaseStyle returns what case style a word is in +// CaseStyle returns what case style a word is in. func CaseStyle(word string) WordCase { upperCount := 0 lowerCount := 0 @@ -42,11 +42,10 @@ func CaseStyle(word string) WordCase { return CaseUnknown } -// CaseVariations returns -// If AllUpper or First-Letter-Only is upcased: add the all upper case version -// If AllLower, add the original, the title and upcase forms -// If Mixed, return the original, and the all upcase form -// +// CaseVariations returns: +// If AllUpper or First-Letter-Only is upper-cased: add the all upper case version. +// If AllLower, add the original, the title and upper-case forms. +// If Mixed, return the original, and the all upper-case form. func CaseVariations(word string, style WordCase) []string { switch style { case CaseLower: diff --git a/vendor/github.com/golangci/misspell/goreleaser.yml b/vendor/github.com/golangci/misspell/goreleaser.yml index 560cb3810c..97aa83e5ac 100644 --- a/vendor/github.com/golangci/misspell/goreleaser.yml +++ b/vendor/github.com/golangci/misspell/goreleaser.yml @@ -1,11 +1,7 @@ -# goreleaser.yml -# https://github.com/goreleaser/goreleaser - project_name: misspell builds: - - - main: cmd/misspell/main.go + - main: cmd/misspell/main.go binary: misspell ldflags: -s -w -X main.version={{.Version}} goos: @@ -14,22 +10,18 @@ builds: - windows goarch: - amd64 + - arm64 env: - CGO_ENABLED=0 - ignore: - - goos: darwin - goarch: 386 - - goos: windows - goarch: 386 -archive: - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - replacements: - amd64: 64bit - 386: 32bit - darwin: mac - files: - - none* +archives: + - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + replacements: + amd64: 64bit + 386: 32bit + darwin: mac + files: + - LICENSE checksum: name_template: "{{ .ProjectName }}_{{ .Version }}_checksums.txt" diff --git a/vendor/github.com/golangci/misspell/install-misspell.sh b/vendor/github.com/golangci/misspell/install-misspell.sh index e24a84a20b..51e9b3372a 100644 --- a/vendor/github.com/golangci/misspell/install-misspell.sh +++ b/vendor/github.com/golangci/misspell/install-misspell.sh @@ -6,12 +6,12 @@ set -e usage() { this=$1 cat < 50000 { fin, err := os.Open(filename) if err != nil { - return "", fmt.Errorf("Unable to open large file %q: %s", filename, err) + return "", fmt.Errorf("unable to open large file %q: %w", filename, err) } defer fin.Close() buf := make([]byte, 512) _, err = io.ReadFull(fin, buf) if err != nil { - return "", fmt.Errorf("Unable to read 512 bytes from %q: %s", filename, err) + return "", fmt.Errorf("unable to read 512 bytes from %q: %w", filename, err) } if !isTextFile(buf) { return "", nil } - // set so we don't double check this file + // set so we don't double-check this file isText = true } // read in whole file - raw, err := ioutil.ReadFile(filename) + raw, err := os.ReadFile(filename) if err != nil { - return "", fmt.Errorf("Unable to read all %q: %s", filename, err) + return "", fmt.Errorf("unable to read all %q: %w", filename, err) } if !isText && !isTextFile(raw) { diff --git a/vendor/github.com/golangci/misspell/notwords.go b/vendor/github.com/golangci/misspell/notwords.go index 06d0d5a5ad..a250cf7f6e 100644 --- a/vendor/github.com/golangci/misspell/notwords.go +++ b/vendor/github.com/golangci/misspell/notwords.go @@ -13,10 +13,10 @@ var ( ) // RemovePath attempts to strip away embedded file system paths, e.g. -// /foo/bar or /static/myimg.png // -// TODO: windows style +// /foo/bar or /static/myimg.png // +// TODO: windows style. func RemovePath(s string) string { out := bytes.Buffer{} var idx int @@ -57,28 +57,28 @@ func RemovePath(s string) string { return out.String() } -// replaceWithBlanks returns a string with the same number of spaces as the input +// replaceWithBlanks returns a string with the same number of spaces as the input. func replaceWithBlanks(s string) string { return strings.Repeat(" ", len(s)) } -// RemoveEmail remove email-like strings, e.g. "nickg+junk@xfoobar.com", "nickg@xyz.abc123.biz" +// RemoveEmail remove email-like strings, e.g. "nickg+junk@xfoobar.com", "nickg@xyz.abc123.biz". func RemoveEmail(s string) string { return reEmail.ReplaceAllStringFunc(s, replaceWithBlanks) } -// RemoveHost removes host-like strings "foobar.com" "abc123.fo1231.biz" +// RemoveHost removes host-like strings "foobar.com" "abc123.fo1231.biz". func RemoveHost(s string) string { return reHost.ReplaceAllStringFunc(s, replaceWithBlanks) } -// RemoveBackslashEscapes removes characters that are preceeded by a backslash -// commonly found in printf format stringd "\nto" +// RemoveBackslashEscapes removes characters that are preceded by a backslash. +// commonly found in printf format string "\nto". func removeBackslashEscapes(s string) string { return reBackslash.ReplaceAllStringFunc(s, replaceWithBlanks) } -// RemoveNotWords blanks out all the not words +// RemoveNotWords blanks out all the not words. func RemoveNotWords(s string) string { // do most selective/specific first return removeBackslashEscapes(RemoveHost(RemoveEmail(RemovePath(StripURL(s))))) diff --git a/vendor/github.com/golangci/misspell/replace.go b/vendor/github.com/golangci/misspell/replace.go index a99bbcc582..bcfcf8deb5 100644 --- a/vendor/github.com/golangci/misspell/replace.go +++ b/vendor/github.com/golangci/misspell/replace.go @@ -18,7 +18,7 @@ func max(x, y int) int { func inArray(haystack []string, needle string) bool { for _, word := range haystack { - if needle == word { + if strings.EqualFold(needle, word) { return true } } @@ -27,7 +27,7 @@ func inArray(haystack []string, needle string) bool { var wordRegexp = regexp.MustCompile(`[a-zA-Z0-9']+`) -// Diff is datastructure showing what changed in a single line +// Diff is datastructures showing what changed in a single line. type Diff struct { Filename string FullLine string @@ -37,7 +37,7 @@ type Diff struct { Corrected string } -// Replacer is the main struct for spelling correction +// Replacer is the main struct for spelling correction. type Replacer struct { Replacements []string Debug bool @@ -45,7 +45,7 @@ type Replacer struct { corrected map[string]string } -// New creates a new default Replacer using the main rule list +// New creates a new default Replacer using the main rule list. func New() *Replacer { r := Replacer{ Replacements: DictMain, @@ -54,31 +54,32 @@ func New() *Replacer { return &r } -// RemoveRule deletes existings rules. -// TODO: make inplace to save memory +// RemoveRule deletes existing rules. +// The content of `ignore` is case-insensitive. +// TODO: make in place to save memory. func (r *Replacer) RemoveRule(ignore []string) { - newwords := make([]string, 0, len(r.Replacements)) + newWords := make([]string, 0, len(r.Replacements)) for i := 0; i < len(r.Replacements); i += 2 { if inArray(ignore, r.Replacements[i]) { continue } - newwords = append(newwords, r.Replacements[i:i+2]...) + newWords = append(newWords, r.Replacements[i:i+2]...) } r.engine = nil - r.Replacements = newwords + r.Replacements = newWords } // AddRuleList appends new rules. // Input is in the same form as Strings.Replacer: [ old1, new1, old2, new2, ....] -// Note: does not check for duplictes +// Note: does not check for duplicates. func (r *Replacer) AddRuleList(additions []string) { r.engine = nil r.Replacements = append(r.Replacements, additions...) } -// Compile compiles the rules. Required before using the Replace functions +// Compile compiles the rules. +// Required before using the Replace functions. func (r *Replacer) Compile() { - r.corrected = make(map[string]string, len(r.Replacements)/2) for i := 0; i < len(r.Replacements); i += 2 { r.corrected[r.Replacements[i]] = r.Replacements[i+1] @@ -92,11 +93,14 @@ extract words from each line1 replace word -> newword if word == new-word - continue + + continue + if new-word in list of replacements - continue -new word not original, and not in list of replacements - some substring got mixed up. UNdo + + continue + +new word not original, and not in list of replacements some substring got mixed up. UNdo. */ func (r *Replacer) recheckLine(s string, lineNum int, buf io.Writer, next func(Diff)) { first := 0 @@ -136,9 +140,8 @@ func (r *Replacer) recheckLine(s string, lineNum int, buf io.Writer, next func(D io.WriteString(buf, s[first:]) } -// ReplaceGo is a specialized routine for correcting Golang source -// files. Currently only checks comments, not identifiers for -// spelling. +// ReplaceGo is a specialized routine for correcting Golang source files. +// Currently only checks comments, not identifiers for spelling. func (r *Replacer) ReplaceGo(input string) (string, []Diff) { var s scanner.Scanner s.Init(strings.NewReader(input)) @@ -169,7 +172,7 @@ Loop: return input, nil } if lastPos < len(input) { - output = output + input[lastPos:] + output += input[lastPos:] } diffs := make([]Diff, 0, 8) buf := bytes.NewBuffer(make([]byte, 0, max(len(input), len(output))+100)) @@ -187,11 +190,9 @@ Loop: } return buf.String(), diffs - } -// Replace is corrects misspellings in input, returning corrected version -// along with a list of diffs. +// Replace is corrects misspellings in input, returning corrected version along with a list of diffs. func (r *Replacer) Replace(input string) (string, []Diff) { output := r.engine.Replace(input) if input == output { @@ -215,8 +216,8 @@ func (r *Replacer) Replace(input string) (string, []Diff) { return buf.String(), diffs } -// ReplaceReader applies spelling corrections to a reader stream. Diffs are -// emitted through a callback. +// ReplaceReader applies spelling corrections to a reader stream. +// Diffs are emitted through a callback. func (r *Replacer) ReplaceReader(raw io.Reader, w io.Writer, next func(Diff)) error { var ( err error @@ -239,7 +240,7 @@ func (r *Replacer) ReplaceReader(raw io.Reader, w io.Writer, next func(Diff)) er io.WriteString(w, line) continue } - // but it can be inaccurate, so we need to double check + // but it can be inaccurate, so we need to double-check r.recheckLine(line, lineNum, w, next) } return nil diff --git a/vendor/github.com/golangci/misspell/stringreplacer.go b/vendor/github.com/golangci/misspell/stringreplacer.go index 3151eceb70..73ca9a56ac 100644 --- a/vendor/github.com/golangci/misspell/stringreplacer.go +++ b/vendor/github.com/golangci/misspell/stringreplacer.go @@ -6,7 +6,6 @@ package misspell import ( "io" - // "log" "strings" ) @@ -38,7 +37,7 @@ func (r *StringReplacer) Replace(s string) string { } // WriteString writes s to w with all replacements performed. -func (r *StringReplacer) WriteString(w io.Writer, s string) (n int, err error) { +func (r *StringReplacer) WriteString(w io.Writer, s string) (int, error) { return r.r.WriteString(w, s) } @@ -46,14 +45,14 @@ func (r *StringReplacer) WriteString(w io.Writer, s string) (n int, err error) { // and values may be empty. For example, the trie containing keys "ax", "ay", // "bcbc", "x" and "xy" could have eight nodes: // -// n0 - -// n1 a- -// n2 .x+ -// n3 .y+ -// n4 b- -// n5 .cbc+ -// n6 x+ -// n7 .y+ +// n0 - +// n1 a- +// n2 .x+ +// n3 .y+ +// n4 b- +// n5 .cbc+ +// n6 x+ +// n7 .y+ // // n0 is the root node, and its children are n1, n4 and n6; n1's children are // n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked @@ -103,6 +102,7 @@ func (t *trieNode) add(key, val string, priority int, r *genericReplacer) { return } + //nolint:nestif // TODO(ldez) must be fixed. if t.prefix != "" { // Need to split the prefix among multiple nodes. var n int // length of the longest common prefix @@ -157,42 +157,6 @@ func (t *trieNode) add(key, val string, priority int, r *genericReplacer) { } } -func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) { - // Iterate down the trie to the end, and grab the value and keylen with - // the highest priority. - bestPriority := 0 - node := &r.root - n := 0 - for node != nil { - if node.priority > bestPriority && !(ignoreRoot && node == &r.root) { - bestPriority = node.priority - val = node.value - keylen = n - found = true - } - - if s == "" { - break - } - if node.table != nil { - index := r.mapping[ByteToLower(s[0])] - if int(index) == r.tableSize { - break - } - node = node.table[index] - s = s[1:] - n++ - } else if node.prefix != "" && StringHasPrefixFold(s, node.prefix) { - n += len(node.prefix) - s = s[len(node.prefix):] - node = node.next - } else { - break - } - } - return -} - // genericReplacer is the fully generic algorithm. // It's used as a fallback when nothing faster can be used. type genericReplacer struct { @@ -236,38 +200,40 @@ func makeGenericReplacer(oldnew []string) *genericReplacer { return r } -type appendSliceWriter []byte - -// Write writes to the buffer to satisfy io.Writer. -func (w *appendSliceWriter) Write(p []byte) (int, error) { - *w = append(*w, p...) - return len(p), nil -} - -// WriteString writes to the buffer without string->[]byte->string allocations. -func (w *appendSliceWriter) WriteString(s string) (int, error) { - *w = append(*w, s...) - return len(s), nil -} - -type stringWriterIface interface { - WriteString(string) (int, error) -} - -type stringWriter struct { - w io.Writer -} - -func (w stringWriter) WriteString(s string) (int, error) { - return w.w.Write([]byte(s)) -} +func (r *genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool) { + // Iterate down the trie to the end, and grab the value and keylen with + // the highest priority. + bestPriority := 0 + node := &r.root + n := 0 + for node != nil { + if node.priority > bestPriority && !(ignoreRoot && node == &r.root) { + bestPriority = node.priority + val = node.value + keylen = n + found = true + } -func getStringWriter(w io.Writer) stringWriterIface { - sw, ok := w.(stringWriterIface) - if !ok { - sw = stringWriter{w} + if s == "" { + break + } + if node.table != nil { + index := r.mapping[ByteToLower(s[0])] + if int(index) == r.tableSize { + break + } + node = node.table[index] + s = s[1:] + n++ + } else if node.prefix != "" && StringHasPrefixFold(s, node.prefix) { + n += len(node.prefix) + s = s[len(node.prefix):] + node = node.next + } else { + break + } } - return sw + return } func (r *genericReplacer) Replace(s string) string { @@ -276,6 +242,7 @@ func (r *genericReplacer) Replace(s string) string { return string(buf) } +//nolint:gocognit // TODO(ldez) must be fixed. func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) { sw := getStringWriter(w) var last, wn int @@ -316,7 +283,7 @@ func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) if err != nil { return } - //log.Printf("%d: Going to correct %q with %q", i, s[i:i+keylen], val) + // debug helper: log.Printf("%d: Going to correct %q with %q", i, s[i:i+keylen], val) wn, err = sw.WriteString(val) n += wn if err != nil { @@ -334,3 +301,33 @@ func (r *genericReplacer) WriteString(w io.Writer, s string) (n int, err error) } return } + +type appendSliceWriter []byte + +// Write writes to the buffer to satisfy io.Writer. +func (w *appendSliceWriter) Write(p []byte) (int, error) { + *w = append(*w, p...) + return len(p), nil +} + +// WriteString writes to the buffer without string->[]byte->string allocations. +func (w *appendSliceWriter) WriteString(s string) (int, error) { + *w = append(*w, s...) + return len(s), nil +} + +type stringWriter struct { + w io.Writer +} + +func (w stringWriter) WriteString(s string) (int, error) { + return w.w.Write([]byte(s)) +} + +func getStringWriter(w io.Writer) io.StringWriter { + sw, ok := w.(io.StringWriter) + if !ok { + sw = stringWriter{w} + } + return sw +} diff --git a/vendor/github.com/golangci/misspell/url.go b/vendor/github.com/golangci/misspell/url.go index 1a259f5f99..203b91a79e 100644 --- a/vendor/github.com/golangci/misspell/url.go +++ b/vendor/github.com/golangci/misspell/url.go @@ -7,11 +7,12 @@ import ( // Regexp for URL https://mathiasbynens.be/demo/url-regex // // original @imme_emosol (54 chars) has trouble with dashes in hostname -// @(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS -var reURL = regexp.MustCompile(`(?i)(https?|ftp)://(-\.)?([^\s/?\.#]+\.?)+(/[^\s]*)?`) +// @(https?|ftp)://(-\.)?([^\s/?\.#-]+\.?)+(/[^\s]*)?$@iS. +var reURL = regexp.MustCompile(`(?i)(https?|ftp)://(-\.)?([^\s/?.#]+\.?)+(/\S*)?`) // StripURL attemps to replace URLs with blank spaces, e.g. -// "xxx http://foo.com/ yyy -> "xxx yyyy" +// +// "xxx http://foo.com/ yyy -> "xxx yyyy". func StripURL(s string) string { return reURL.ReplaceAllStringFunc(s, replaceWithBlanks) } diff --git a/vendor/github.com/golangci/revgrep/.golangci.yml b/vendor/github.com/golangci/revgrep/.golangci.yml index b8ed6204fe..02ed5ec849 100644 --- a/vendor/github.com/golangci/revgrep/.golangci.yml +++ b/vendor/github.com/golangci/revgrep/.golangci.yml @@ -28,12 +28,20 @@ linters-settings: linters: enable-all: true disable: - - maligned # Deprecated - - scopelint # Deprecated - - golint # Deprecated - - interfacer # Deprecated - - exhaustivestruct # Deprecated + - deadcode # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated - cyclop # duplicate of gocyclo + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) - dupl - lll - nestif @@ -54,6 +62,7 @@ linters: - nosnakecase - nonamedreturns - nilerr + - depguard issues: exclude-use-default: false diff --git a/vendor/github.com/golangci/revgrep/revgrep.go b/vendor/github.com/golangci/revgrep/revgrep.go index 4b990fa048..7796b1c01c 100644 --- a/vendor/github.com/golangci/revgrep/revgrep.go +++ b/vendor/github.com/golangci/revgrep/revgrep.go @@ -1,3 +1,4 @@ +// Package revgrep filter static analysis tools to only lines changed based on a commit reference. package revgrep import ( @@ -17,31 +18,26 @@ import ( // Checker provides APIs to filter static analysis tools to specific commits, // such as showing only issues since last commit. type Checker struct { - // Patch file (unified) to read to detect lines being changed, if nil revgrep - // will attempt to detect the VCS and generate an appropriate patch. Auto - // detection will search for uncommitted changes first, if none found, will - // generate a patch from last committed change. File paths within patches - // must be relative to current working directory. + // Patch file (unified) to read to detect lines being changed, + // if nil revgrep will attempt to detect the VCS and generate an appropriate patch. + // Auto-detection will search for uncommitted changes first, + // if none found, will generate a patch from last committed change. + // File paths within patches must be relative to current working directory. Patch io.Reader - // NewFiles is a list of file names (with absolute paths) where the entire - // contents of the file is new. + // NewFiles is a list of file names (with absolute paths) where the entire contents of the file is new. NewFiles []string // Debug sets the debug writer for additional output. Debug io.Writer - // RevisionFrom check revision starting at, leave blank for auto detection - // ignored if patch is set. + // RevisionFrom check revision starting at, leave blank for auto-detection ignored if patch is set. RevisionFrom string - // WholeFiles indicates that the user wishes to see all issues that comes up - // anywhere in any file that has been changed in this revision or patch. + // WholeFiles indicates that the user wishes to see all issues that comes up anywhere in any file that has been changed in this revision or patch. WholeFiles bool - // RevisionTo checks revision finishing at, leave blank for auto detection - // ignored if patch is set. + // RevisionTo checks revision finishing at, leave blank for auto-detection ignored if patch is set. RevisionTo string // Regexp to match path, line number, optional column number, and message. Regexp string - // AbsPath is used to make an absolute path of an issue's filename to be - // relative in order to match patch file. If not set, current working - // directory is used. + // AbsPath is used to make an absolute path of an issue's filename to be relative in order to match patch file. + // If not set, current working directory is used. AbsPath string // Calculated changes for next calls to IsNewIssue @@ -56,9 +52,7 @@ type Issue struct { LineNo int // ColNo is the column number or 0 if none could be parsed. ColNo int - // HunkPos is position from file's first @@, for new files this will be the - // line number. - // + // HunkPos is position from file's first @@, for new files this will be the line number. // See also: https://developer.github.com/v3/pulls/comments/#create-a-comment HunkPos int // Issue text as it appeared from the tool. @@ -135,16 +129,14 @@ func (c *Checker) IsNewIssue(i InputIssue) (hunkPos int, isNew bool) { return 0, false } -// Check scans reader and writes any lines to writer that have been added in -// Checker.Patch. +// Check scans reader and writes any lines to writer that have been added in Checker.Patch. // // Returns the issues written to writer when no error occurs. // -// If no VCS could be found or other VCS errors occur, all issues are written -// to writer and an error is returned. +// If no VCS could be found or other VCS errors occur, +// all issues are written to writer and an error is returned. // -// File paths in reader must be relative to current working directory or -// absolute. +// File paths in reader must be relative to current working directory or absolute. func (c *Checker) Check(reader io.Reader, writer io.Writer) (issues []Issue, err error) { returnErr := c.Prepare() writeAll := returnErr != nil @@ -265,8 +257,7 @@ func (c *Checker) preparePatch() error { } // linesChanges returns a map of file names to line numbers being changed. -// If key is nil, the file has been recently added, else it contains a slice -// of positions that have been added. +// If key is nil, the file has been recently added, else it contains a slice of positions that have been added. func (c *Checker) linesChanged() map[string][]pos { type state struct { file string @@ -343,17 +334,12 @@ func (c *Checker) linesChanged() map[string][]pos { return changes } -// GitPatch returns a patch from a git repository, if no git repository was -// was found and no errors occurred, nil is returned, else an error is returned -// revisionFrom and revisionTo defines the git diff parameters, if left blank -// and there are unstaged changes or untracked files, only those will be returned -// else only check changes since HEAD~. If revisionFrom is set but revisionTo -// is not, untracked files will be included, to exclude untracked files set -// revisionTo to HEAD~. It's incorrect to specify revisionTo without a -// revisionFrom. +// GitPatch returns a patch from a git repository, +// if no git repository was found and no errors occurred, nil is returned, else an error is returned revisionFrom and revisionTo defines the git diff parameters, +// if left blank and there are unstaged changes or untracked files, only those will be returned else only check changes since HEAD~. +// If revisionFrom is set but revisionTo is not, untracked files will be included, to exclude untracked files set revisionTo to HEAD~. +// It's incorrect to specify revisionTo without a revisionFrom. func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { - var patch bytes.Buffer - // check if git repo exists if err := exec.Command("git", "status", "--porcelain").Run(); err != nil { // don't return an error, we assume the error is not repo exists @@ -377,8 +363,9 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { newFiles = append(newFiles, string(file)) } + var patch bytes.Buffer if revisionFrom != "" { - cmd := exec.Command("git", "diff", "--color=never", "--relative", revisionFrom) + cmd := gitDiff(revisionFrom) if revisionTo != "" { cmd.Args = append(cmd.Args, revisionTo) } @@ -392,12 +379,12 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { if revisionTo == "" { return &patch, newFiles, nil } + return &patch, nil, nil } // make a patch for unstaged changes - // use --no-prefix to remove b/ given: +++ b/main.go - cmd := exec.Command("git", "diff", "--color=never", "--relative", "--") + cmd := gitDiff("--") cmd.Stdout = &patch if err := cmd.Run(); err != nil { return nil, nil, fmt.Errorf("error executing git diff: %w", err) @@ -412,7 +399,7 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { // check for changes in recent commit - cmd = exec.Command("git", "diff", "--color=never", "--relative", "HEAD~", "--") + cmd = gitDiff("HEAD~", "--") cmd.Stdout = &patch if err := cmd.Run(); err != nil { return nil, nil, fmt.Errorf("error executing git diff HEAD~: %w", err) @@ -420,3 +407,55 @@ func GitPatch(revisionFrom, revisionTo string) (io.Reader, []string, error) { return &patch, nil, nil } + +func gitDiff(extraArgs ...string) *exec.Cmd { + cmd := exec.Command("git", "diff", "--color=never", "--no-ext-diff") + + if isSupportedByGit(2, 41, 0) { + cmd.Args = append(cmd.Args, "--default-prefix") + } + + cmd.Args = append(cmd.Args, "--relative") + cmd.Args = append(cmd.Args, extraArgs...) + + return cmd +} + +func isSupportedByGit(major, minor, patch int) bool { + output, err := exec.Command("git", "version").CombinedOutput() + if err != nil { + return false + } + + parts := bytes.Split(bytes.TrimSpace(output), []byte(" ")) + if len(parts) < 3 { + return false + } + + v := string(parts[2]) + if v == "" { + return false + } + + vp := regexp.MustCompile(`^(\d+)\.(\d+)(?:\.(\d+))?.*$`).FindStringSubmatch(v) + if len(vp) < 4 { + return false + } + + currentMajor, err := strconv.Atoi(vp[1]) + if err != nil { + return false + } + + currentMinor, err := strconv.Atoi(vp[2]) + if err != nil { + return false + } + + currentPatch, err := strconv.Atoi(vp[3]) + if err != nil { + return false + } + + return currentMajor*1_000_000_000+currentMinor*1_000_000+currentPatch*1_000 >= major*1_000_000_000+minor*1_000_000+patch*1_000 +} diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md index d566950f38..fe0f5c1da8 100644 --- a/vendor/github.com/google/s2a-go/README.md +++ b/vendor/github.com/google/s2a-go/README.md @@ -10,8 +10,5 @@ Session Agent during the TLS handshake, and to encrypt traffic to the peer after the TLS handshake is complete. This repository contains the source code for the Secure Session Agent's Go -client libraries, which allow gRPC-Go applications to use the Secure Session -Agent. This repository supports the Bazel and Golang build systems. - -All code in this repository is experimental and subject to change. We do not -guarantee API stability at this time. +client libraries, which allow gRPC and HTTP Go applications to use the Secure Session +Agent. diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go index 49573af887..ed44965370 100644 --- a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go @@ -21,50 +21,27 @@ package service import ( "context" - "net" - "os" - "strings" "sync" - "time" - "google.golang.org/appengine" - "google.golang.org/appengine/socket" grpc "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) -// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. -const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" - var ( - // appEngineDialerHook is an AppEngine-specific dial option that is set - // during init time. If nil, then the application is not running on Google - // AppEngine. - appEngineDialerHook func(context.Context) grpc.DialOption // mu guards hsConnMap and hsDialer. mu sync.Mutex // hsConnMap represents a mapping from an S2A handshaker service address // to a corresponding connection to an S2A handshaker service instance. hsConnMap = make(map[string]*grpc.ClientConn) // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial + hsDialer = grpc.DialContext ) -func init() { - if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { - return - } - appEngineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} - // Dial dials the S2A handshaker service. If a connection has already been // established, this function returns it. Otherwise, a new connection is // created. -func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { +func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) { mu.Lock() defer mu.Unlock() @@ -72,17 +49,14 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { if !ok { // Create a new connection to the S2A handshaker service. Note that // this connection stays open until the application is closed. - grpcOpts := []grpc.DialOption{ - grpc.WithInsecure(), - } - if enableAppEngineDialer() && appEngineDialerHook != nil { - if grpclog.V(1) { - grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") - } - grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) + var grpcOpts []grpc.DialOption + if transportCreds != nil { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds)) + } else { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) } var err error - hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) + hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...) if err != nil { return nil, err } @@ -90,10 +64,3 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } - -func enableAppEngineDialer() bool { - if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { - return true - } - return false -} diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go index 33fa3c55d4..e51199ab3a 100644 --- a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +++ b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go @@ -83,13 +83,15 @@ func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete ch t.ensureProcessSessionTickets.Done() } }() - hsConn, err := service.Dial(t.hsAddr) + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + // The transportCreds only needs to be set when talking to S2AV2 and also + // if mTLS is required. + hsConn, err := service.Dial(ctx, t.hsAddr, nil) if err != nil { return err } client := s2apb.NewS2AServiceClient(hsConn) - ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) - defer cancel() session, err := client.SetUpSession(ctx) if err != nil { return err diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index ff172883f2..85a8379d83 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -33,6 +33,7 @@ import ( "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2/tlsconfigstore" + "github.com/google/s2a-go/retry" "github.com/google/s2a-go/stream" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -44,18 +45,19 @@ import ( const ( s2aSecurityProtocol = "tls" - defaultS2ATimeout = 3 * time.Second + defaultS2ATimeout = 6 * time.Second ) // An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. const s2aTimeoutEnv = "S2A_TIMEOUT" type s2av2TransportCreds struct { - info *credentials.ProtocolInfo - isClient bool - serverName string - s2av2Address string - tokenManager *tokenmanager.AccessTokenManager + info *credentials.ProtocolInfo + isClient bool + serverName string + s2av2Address string + transportCreds credentials.TransportCredentials + tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. @@ -68,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -79,6 +81,7 @@ func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, ver isClient: true, serverName: "", s2av2Address: s2av2Address, + transportCreds: transportCreds, localIdentity: localIdentity, verificationMode: verificationMode, fallbackClientHandshake: fallbackClientHandshakeFunc, @@ -98,7 +101,7 @@ func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, ver // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -107,6 +110,7 @@ func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, }, isClient: false, s2av2Address: s2av2Address, + transportCreds: transportCreds, localIdentities: localIdentities, verificationMode: verificationMode, getS2AStream: getS2AStream, @@ -131,7 +135,13 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori serverName := removeServerNamePort(serverAuthority) timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) defer cancel() - s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) + var s2AStream stream.S2AStream + var err error + retry.Run(timeoutCtx, + func() error { + s2AStream, err = createStream(timeoutCtx, c.s2av2Address, c.transportCreds, c.getS2AStream) + return err + }) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -152,31 +162,34 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori tokenManager = *c.tokenManager } - if c.serverName == "" { - config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - } else { - config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err + sn := serverName + if c.serverName != "" { + sn = c.serverName + } + retry.Run(timeoutCtx, + func() error { + config, err = tlsconfigstore.GetTLSConfigurationForClient(sn, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + return err + }) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) } + return nil, nil, err } if grpclog.V(1) { grpclog.Infof("Got client TLS config from S2Av2.") } - creds := credentials.NewTLS(config) - conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) + creds := credentials.NewTLS(config) + var conn net.Conn + var authInfo credentials.AuthInfo + retry.Run(timeoutCtx, + func() error { + conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) + return err + }) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -196,7 +209,13 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) defer cancel() - s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) + var s2AStream stream.S2AStream + var err error + retry.Run(ctx, + func() error { + s2AStream, err = createStream(ctx, c.s2av2Address, c.transportCreds, c.getS2AStream) + return err + }) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, nil, err @@ -213,7 +232,12 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede tokenManager = *c.tokenManager } - config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + var config *tls.Config + retry.Run(ctx, + func() error { + config, err = tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + return err + }) if err != nil { grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) return nil, nil, err @@ -221,8 +245,20 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede if grpclog.V(1) { grpclog.Infof("Got server TLS config from S2Av2.") } + creds := credentials.NewTLS(config) - return creds.ServerHandshake(rawConn) + var conn net.Conn + var authInfo credentials.AuthInfo + retry.Run(ctx, + func() error { + conn, authInfo, err = creds.ServerHandshake(rawConn) + return err + }) + if err != nil { + grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) + return nil, nil, err + } + return conn, authInfo, err } // Info returns protocol info of s2av2TransportCreds. @@ -278,11 +314,12 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { func NewClientTLSConfig( ctx context.Context, s2av2Address string, + transportCreds credentials.TransportCredentials, tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, nil) + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -325,12 +362,12 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } // TODO(rmehta19): Consider whether to close the connection to S2Av2. - conn, err := service.Dial(s2av2Address) + conn, err := service.Dial(ctx, s2av2Address, transportCreds) if err != nil { return nil, err } diff --git a/vendor/github.com/google/s2a-go/retry/retry.go b/vendor/github.com/google/s2a-go/retry/retry.go new file mode 100644 index 0000000000..f7e0a23779 --- /dev/null +++ b/vendor/github.com/google/s2a-go/retry/retry.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package retry provides a retry helper for talking to S2A gRPC server. +// The implementation is modeled after +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go +package retry + +import ( + "context" + "math/rand" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + maxRetryAttempts = 5 + maxRetryForLoops = 10 +) + +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +// Pause returns a duration, which is used as the backoff wait time +// before the next retry. +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// Sleep will wait for the specified duration or return on context +// expiration. +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +// NewRetryer creates an instance of S2ARetryer using the defaultBackoff +// implementation. +var NewRetryer = func() *S2ARetryer { + return &S2ARetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +// S2ARetryer implements a retry helper for talking to S2A gRPC server. +type S2ARetryer struct { + bo backoff + attempts int +} + +// Attempts return the number of retries attempted. +func (r *S2ARetryer) Attempts() int { + return r.attempts +} + +// Retry returns a boolean indicating whether retry should be performed +// and the backoff duration. +func (r *S2ARetryer) Retry(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + if r.attempts >= maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +// Run uses S2ARetryer to execute the function passed in, until success or reaching +// max number of retry attempts. +func Run(ctx context.Context, f func() error) { + retryer := NewRetryer() + forLoopCnt := 0 + var err error + for { + err = f() + if bo, shouldRetry := retryer.Retry(err); shouldRetry { + if grpclog.V(1) { + grpclog.Infof("will attempt retry: %v", err) + } + if ctx.Err() != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to context error: %v", ctx.Err()) + } + break + } + if errSleep := Sleep(ctx, bo); errSleep != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to sleep error: %v", errSleep) + } + break + } + // This shouldn't happen, just make sure we are not stuck in the for loops. + forLoopCnt++ + if forLoopCnt > maxRetryForLoops { + if grpclog.V(1) { + grpclog.Infof("exit the for loop after too many retries") + } + break + } + continue + } + if grpclog.V(1) { + grpclog.Infof("retry conditions not met, exit the loop") + } + break + } +} diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 1c1349de4a..5ecb06f930 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,6 +35,7 @@ import ( "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" + "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -111,7 +112,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -146,7 +147,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -155,17 +156,17 @@ func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority return nil, nil, errors.New("client handshake called using server transport credentials") } + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) if err != nil { grpclog.Infof("Failed to connect to S2A: %v", err) return nil, nil, err } - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - opts := &handshaker.ClientHandshakerOptions{ MinTLSVersion: c.minTLSVersion, MaxTLSVersion: c.maxTLSVersion, @@ -203,16 +204,16 @@ func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credent return nil, nil, errors.New("server handshake called using client transport credentials") } + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) if err != nil { grpclog.Infof("Failed to connect to S2A: %v", err) return nil, nil, err } - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - opts := &handshaker.ServerHandshakerOptions{ MinTLSVersion: c.minTLSVersion, MaxTLSVersion: c.maxTLSVersion, @@ -312,6 +313,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err grpclog.Infof("Access token manager not initialized: %v", err) return &s2aTLSClientConfigFactory{ s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, @@ -319,6 +321,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err } return &s2aTLSClientConfigFactory{ s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, @@ -327,6 +330,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err type s2aTLSClientConfigFactory struct { s2av2Address string + transportCreds credentials.TransportCredentials tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte @@ -338,7 +342,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -390,9 +394,15 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net } timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) defer cancel() - s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ - ServerName: serverName, - }) + + var s2aTLSConfig *tls.Config + retry.Run(timeoutCtx, + func() error { + s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + return err + }) if err != nil { grpclog.Infof("error building S2A TLS config: %v", err) return fallback(err) @@ -401,7 +411,12 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net s2aDialer := &tls.Dialer{ Config: s2aTLSConfig, } - c, err := s2aDialer.DialContext(ctx, network, addr) + var c net.Conn + retry.Run(timeoutCtx, + func() error { + c, err = s2aDialer.DialContext(timeoutCtx, network, addr) + return err + }) if err != nil { grpclog.Infof("error dialing with S2A to %s: %v", addr, err) return fallback(err) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index 94feafb9cf..fcdbc1621b 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -26,6 +26,7 @@ import ( "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/stream" + "google.golang.org/grpc/credentials" s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ) @@ -92,6 +93,9 @@ type ClientOptions struct { LocalIdentity Identity // S2AAddress is the address of the S2A. S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials // EnsureProcessSessionTickets waits for all session tickets to be sent to // S2A before a process completes. // @@ -173,6 +177,9 @@ type ServerOptions struct { LocalIdentities []Identity // S2AAddress is the address of the S2A. S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials // If true, enables the use of legacy S2Av1. EnableLegacyMode bool // VerificationMode specifies the mode that S2A must use to verify the diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index b3283b8158..ea5beb5aa7 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -35,6 +35,8 @@ import ( const signAPI = "EnterpriseCertSigner.Sign" const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" const publicKeyAPI = "EnterpriseCertSigner.Public" +const encryptAPI = "EnterpriseCertSigner.Encrypt" +const decryptAPI = "EnterpriseCertSigner.Decrypt" // A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. type Connection struct { @@ -54,13 +56,28 @@ func (c *Connection) Close() error { func init() { gob.Register(crypto.SHA256) + gob.Register(crypto.SHA384) + gob.Register(crypto.SHA512) gob.Register(&rsa.PSSOptions{}) + gob.Register(&rsa.OAEPOptions{}) } -// SignArgs contains arguments to a crypto Signer.Sign method. +// SignArgs contains arguments for a Sign API call. type SignArgs struct { Digest []byte // The content to sign. - Opts crypto.SignerOpts // Options for signing, such as Hash identifier. + Opts crypto.SignerOpts // Options for signing. Must implement HashFunc(). +} + +// EncryptArgs contains arguments for an Encrypt API call. +type EncryptArgs struct { + Plaintext []byte // The plaintext to encrypt. + Opts any // Options for encryption. Ex: an instance of crypto.Hash. +} + +// DecryptArgs contains arguments to for a Decrypt API call. +type DecryptArgs struct { + Ciphertext []byte // The ciphertext to decrypt. + Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions. } // Key implements credential.Credential by holding the executed signer subprocess. @@ -98,7 +115,7 @@ func (k *Key) Public() crypto.PublicKey { return k.publicKey } -// Sign signs a message digest, using the specified signer options. +// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface. func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) @@ -107,6 +124,18 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ return } +// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts. +func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) { + err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext) + return +} + +// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface. +func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { + err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext) + return +} + // ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, // possibly due to missing config or missing binary path. var ErrCredUnavailable = errors.New("Cred is unavailable") @@ -120,7 +149,12 @@ var ErrCredUnavailable = errors.New("Cred is unavailable") // The config file also specifies which certificate the signer should use. func Cred(configFilePath string) (*Key, error) { if configFilePath == "" { - configFilePath = util.GetDefaultConfigFilePath() + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index 1640ec1c9e..f374a7f55f 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -22,6 +22,7 @@ import ( "os/user" "path/filepath" "runtime" + "strings" ) const configFileName = "certificate_config.json" @@ -63,6 +64,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { if signerBinaryPath == "" { return "", ErrConfigUnavailable } + + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir()) + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir()) return signerBinaryPath, nil } @@ -89,3 +93,8 @@ func getDefaultConfigFileDirectory() (directory string) { func GetDefaultConfigFilePath() (path string) { return filepath.Join(getDefaultConfigFileDirectory(), configFileName) } + +// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG +func GetConfigFilePathFromEnv() (path string) { + return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG") +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 91d60a809f..ef508417b3 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.11.0" + "v2": "2.12.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index e17b196f6c..ae71149470 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,13 @@ # Changelog +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + ## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 0000000000..af15fb5827 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,74 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 6488461f4d..453fab7ecc 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -31,9 +31,15 @@ package gax import ( "bytes" + "context" + "fmt" + "net/http" "runtime" "strings" "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" ) var ( @@ -117,3 +123,46 @@ func XGoogHeader(keyval ...string) string { } return buf.String()[1:] } + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 374dcdb115..7425b5ffbb 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.11.0" +const Version = "2.12.0" diff --git a/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go b/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go index c7b4fa9784..f9dece8f2b 100644 --- a/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go +++ b/vendor/github.com/gordonklaus/ineffassign/pkg/ineffassign/ineffassign.go @@ -60,6 +60,7 @@ type builder struct { block *block vars map[*ast.Object]*variable results []*ast.FieldList + defers []bool breaks branchStack continues branchStack gotos branchStack @@ -181,6 +182,12 @@ func (bld *builder) Visit(n ast.Node) ast.Visitor { } brek.setDestination(bld.newBlock(exits...)) bld.breaks.pop() + case *ast.DeferStmt: + bld.walk(n.Call.Fun) + for _, a := range n.Call.Args { + bld.walk(a) + } + bld.defers[len(bld.defers)-1] = true case *ast.LabeledStmt: bld.gotos.get(n.Label).setDestination(bld.newBlock(bld.block)) bld.labelStmt = n @@ -360,6 +367,7 @@ func (bld *builder) fun(typ *ast.FuncType, body *ast.BlockStmt) { v.fundept++ } bld.results = append(bld.results, typ.Results) + bld.defers = append(bld.defers, false) b := bld.block bld.newBlock() @@ -369,6 +377,7 @@ func (bld *builder) fun(typ *ast.FuncType, body *ast.BlockStmt) { bld.block = b bld.results = bld.results[:len(bld.results)-1] + bld.defers = bld.defers[:len(bld.defers)-1] for _, v := range bld.vars { v.fundept-- } @@ -422,8 +431,11 @@ func (bld *builder) swtch(stmt ast.Stmt, cases []ast.Stmt) { bld.breaks.pop() } -// An operation that might panic marks named function results as used. +// If an operation might panic and be recovered, mark named function results as used. func (bld *builder) maybePanic() { + if len(bld.defers) == 0 || !bld.defers[len(bld.defers)-1] { + return + } if len(bld.results) == 0 { return } diff --git a/vendor/github.com/hexops/gotextdiff/LICENSE b/vendor/github.com/hexops/gotextdiff/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hexops/gotextdiff/README.md b/vendor/github.com/hexops/gotextdiff/README.md new file mode 100644 index 0000000000..bfd49a0c97 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/README.md @@ -0,0 +1,54 @@ +# gotextdiff - unified text diffing in Go Hexops logo + +This is a copy of the Go text diffing packages that [the official Go language server gopls uses internally](https://github.com/golang/tools/tree/master/internal/lsp/diff) to generate unified diffs. + +If you've previously tried to generate unified text diffs in Go (like the ones you see in Git and on GitHub), you may have found [github.com/sergi/go-diff](https://github.com/sergi/go-diff) which is a Go port of Neil Fraser's google-diff-match-patch code - however it [does not support unified diffs](https://github.com/sergi/go-diff/issues/57). + +This is arguably one of the best (and most maintained) unified text diffing packages in Go as of at least 2020. + +(All credit goes to [the Go authors](http://tip.golang.org/AUTHORS), I am merely re-publishing their work so others can use it.) + +## Example usage + +Import the packages: + +```Go +import ( + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" +) +``` + +Assuming you want to diff `a.txt` and `b.txt`, whose contents are stored in `aString` and `bString` then: + +```Go +edits := myers.ComputeEdits(span.URIFromPath("a.txt"), aString, bString) +diff := fmt.Sprint(gotextdiff.ToUnified("a.txt", "b.txt", aString, edits)) +``` + +`diff` will be a string like: + +```diff +--- a.txt ++++ b.txt +@@ -1,13 +1,28 @@ +-foo ++bar +``` + +## API compatability + +We will publish a new major version anytime the API changes in a backwards-incompatible way. Because the upstream is not being developed with this being a public package in mind, API breakages may occur more often than in other Go packages (but you can always continue using the old version thanks to Go modules.) + +## Alternatives + +- [github.com/andreyvit/diff](https://github.com/andreyvit/diff): Quick'n'easy string diffing functions for Golang based on github.com/sergi/go-diff. +- [github.com/kylelemons/godebug/diff](https://github.com/kylelemons/godebug/tree/master/diff): implements a linewise diff algorithm ([inactive](https://github.com/kylelemons/godebug/issues/22#issuecomment-524573477)). + +## Contributing + +We will only accept changes made [upstream](https://github.com/golang/tools/tree/master/internal/lsp/diff), please send any contributions to the upstream instead! Compared to the upstream, only import paths will be modified (to be non-`internal` so they are importable.) The only thing we add here is this README. + +## License + +See https://github.com/golang/tools/blob/master/LICENSE diff --git a/vendor/github.com/hexops/gotextdiff/diff.go b/vendor/github.com/hexops/gotextdiff/diff.go new file mode 100644 index 0000000000..53e499bc0c --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/diff.go @@ -0,0 +1,159 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package gotextdiff supports a pluggable diff algorithm. +package gotextdiff + +import ( + "sort" + "strings" + + "github.com/hexops/gotextdiff/span" +) + +// TextEdit represents a change to a section of a document. +// The text within the specified span should be replaced by the supplied new text. +type TextEdit struct { + Span span.Span + NewText string +} + +// ComputeEdits is the type for a function that produces a set of edits that +// convert from the before content to the after content. +type ComputeEdits func(uri span.URI, before, after string) []TextEdit + +// SortTextEdits attempts to order all edits by their starting points. +// The sort is stable so that edits with the same starting point will not +// be reordered. +func SortTextEdits(d []TextEdit) { + // Use a stable sort to maintain the order of edits inserted at the same position. + sort.SliceStable(d, func(i int, j int) bool { + return span.Compare(d[i].Span, d[j].Span) < 0 + }) +} + +// ApplyEdits applies the set of edits to the before and returns the resulting +// content. +// It may panic or produce garbage if the edits are not valid for the provided +// before content. +func ApplyEdits(before string, edits []TextEdit) string { + // Preconditions: + // - all of the edits apply to before + // - and all the spans for each TextEdit have the same URI + if len(edits) == 0 { + return before + } + _, edits, _ = prepareEdits(before, edits) + after := strings.Builder{} + last := 0 + for _, edit := range edits { + start := edit.Span.Start().Offset() + if start > last { + after.WriteString(before[last:start]) + last = start + } + after.WriteString(edit.NewText) + last = edit.Span.End().Offset() + } + if last < len(before) { + after.WriteString(before[last:]) + } + return after.String() +} + +// LineEdits takes a set of edits and expands and merges them as necessary +// to ensure that there are only full line edits left when it is done. +func LineEdits(before string, edits []TextEdit) []TextEdit { + if len(edits) == 0 { + return nil + } + c, edits, partial := prepareEdits(before, edits) + if partial { + edits = lineEdits(before, c, edits) + } + return edits +} + +// prepareEdits returns a sorted copy of the edits +func prepareEdits(before string, edits []TextEdit) (*span.TokenConverter, []TextEdit, bool) { + partial := false + c := span.NewContentConverter("", []byte(before)) + copied := make([]TextEdit, len(edits)) + for i, edit := range edits { + edit.Span, _ = edit.Span.WithAll(c) + copied[i] = edit + partial = partial || + edit.Span.Start().Offset() >= len(before) || + edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1 + } + SortTextEdits(copied) + return c, copied, partial +} + +// lineEdits rewrites the edits to always be full line edits +func lineEdits(before string, c *span.TokenConverter, edits []TextEdit) []TextEdit { + adjusted := make([]TextEdit, 0, len(edits)) + current := TextEdit{Span: span.Invalid} + for _, edit := range edits { + if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() { + // overlaps with the current edit, need to combine + // first get the gap from the previous edit + gap := before[current.Span.End().Offset():edit.Span.Start().Offset()] + // now add the text of this edit + current.NewText += gap + edit.NewText + // and then adjust the end position + current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End()) + } else { + // does not overlap, add previous run (if there is one) + adjusted = addEdit(before, adjusted, current) + // and then remember this edit as the start of the next run + current = edit + } + } + // add the current pending run if there is one + return addEdit(before, adjusted, current) +} + +func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit { + if !edit.Span.IsValid() { + return edits + } + // if edit is partial, expand it to full line now + start := edit.Span.Start() + end := edit.Span.End() + if start.Column() > 1 { + // prepend the text and adjust to start of line + delta := start.Column() - 1 + start = span.NewPoint(start.Line(), 1, start.Offset()-delta) + edit.Span = span.New(edit.Span.URI(), start, end) + edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText + } + if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' { + // after end of file that does not end in eol, so join to last line of file + // to do this we need to know where the start of the last line was + eol := strings.LastIndex(before, "\n") + if eol < 0 { + // file is one non terminated line + eol = 0 + } + delta := len(before) - eol + start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta) + edit.Span = span.New(edit.Span.URI(), start, end) + edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText + } + if end.Column() > 1 { + remains := before[end.Offset():] + eol := strings.IndexRune(remains, '\n') + if eol < 0 { + eol = len(remains) + } else { + eol++ + } + end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol) + edit.Span = span.New(edit.Span.URI(), start, end) + edit.NewText = edit.NewText + remains[:eol] + } + edits = append(edits, edit) + return edits +} diff --git a/vendor/github.com/hexops/gotextdiff/myers/diff.go b/vendor/github.com/hexops/gotextdiff/myers/diff.go new file mode 100644 index 0000000000..5e3e923648 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/myers/diff.go @@ -0,0 +1,205 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package myers implements the Myers diff algorithm. +package myers + +import ( + "strings" + + diff "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/span" +) + +// Sources: +// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/ +// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2 + +func ComputeEdits(uri span.URI, before, after string) []diff.TextEdit { + ops := operations(splitLines(before), splitLines(after)) + edits := make([]diff.TextEdit, 0, len(ops)) + for _, op := range ops { + s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0)) + switch op.Kind { + case diff.Delete: + // Delete: unformatted[i1:i2] is deleted. + edits = append(edits, diff.TextEdit{Span: s}) + case diff.Insert: + // Insert: formatted[j1:j2] is inserted at unformatted[i1:i1]. + if content := strings.Join(op.Content, ""); content != "" { + edits = append(edits, diff.TextEdit{Span: s, NewText: content}) + } + } + } + return edits +} + +type operation struct { + Kind diff.OpKind + Content []string // content from b + I1, I2 int // indices of the line in a + J1 int // indices of the line in b, J2 implied by len(Content) +} + +// operations returns the list of operations to convert a into b, consolidating +// operations for multiple lines and not including equal lines. +func operations(a, b []string) []*operation { + if len(a) == 0 && len(b) == 0 { + return nil + } + + trace, offset := shortestEditSequence(a, b) + snakes := backtrack(trace, len(a), len(b), offset) + + M, N := len(a), len(b) + + var i int + solution := make([]*operation, len(a)+len(b)) + + add := func(op *operation, i2, j2 int) { + if op == nil { + return + } + op.I2 = i2 + if op.Kind == diff.Insert { + op.Content = b[op.J1:j2] + } + solution[i] = op + i++ + } + x, y := 0, 0 + for _, snake := range snakes { + if len(snake) < 2 { + continue + } + var op *operation + // delete (horizontal) + for snake[0]-snake[1] > x-y { + if op == nil { + op = &operation{ + Kind: diff.Delete, + I1: x, + J1: y, + } + } + x++ + if x == M { + break + } + } + add(op, x, y) + op = nil + // insert (vertical) + for snake[0]-snake[1] < x-y { + if op == nil { + op = &operation{ + Kind: diff.Insert, + I1: x, + J1: y, + } + } + y++ + } + add(op, x, y) + op = nil + // equal (diagonal) + for x < snake[0] { + x++ + y++ + } + if x >= M && y >= N { + break + } + } + return solution[:i] +} + +// backtrack uses the trace for the edit sequence computation and returns the +// "snakes" that make up the solution. A "snake" is a single deletion or +// insertion followed by zero or diagonals. +func backtrack(trace [][]int, x, y, offset int) [][]int { + snakes := make([][]int, len(trace)) + d := len(trace) - 1 + for ; x > 0 && y > 0 && d > 0; d-- { + V := trace[d] + if len(V) == 0 { + continue + } + snakes[d] = []int{x, y} + + k := x - y + + var kPrev int + if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { + kPrev = k + 1 + } else { + kPrev = k - 1 + } + + x = V[kPrev+offset] + y = x - kPrev + } + if x < 0 || y < 0 { + return snakes + } + snakes[d] = []int{x, y} + return snakes +} + +// shortestEditSequence returns the shortest edit sequence that converts a into b. +func shortestEditSequence(a, b []string) ([][]int, int) { + M, N := len(a), len(b) + V := make([]int, 2*(N+M)+1) + offset := N + M + trace := make([][]int, N+M+1) + + // Iterate through the maximum possible length of the SES (N+M). + for d := 0; d <= N+M; d++ { + copyV := make([]int, len(V)) + // k lines are represented by the equation y = x - k. We move in + // increments of 2 because end points for even d are on even k lines. + for k := -d; k <= d; k += 2 { + // At each point, we either go down or to the right. We go down if + // k == -d, and we go to the right if k == d. We also prioritize + // the maximum x value, because we prefer deletions to insertions. + var x int + if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) { + x = V[k+1+offset] // down + } else { + x = V[k-1+offset] + 1 // right + } + + y := x - k + + // Diagonal moves while we have equal contents. + for x < M && y < N && a[x] == b[y] { + x++ + y++ + } + + V[k+offset] = x + + // Return if we've exceeded the maximum values. + if x == M && y == N { + // Makes sure to save the state of the array before returning. + copy(copyV, V) + trace[d] = copyV + return trace, offset + } + } + + // Save the state of the array. + copy(copyV, V) + trace[d] = copyV + } + return nil, 0 +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} diff --git a/vendor/github.com/hexops/gotextdiff/span/parse.go b/vendor/github.com/hexops/gotextdiff/span/parse.go new file mode 100644 index 0000000000..aa17c84ec1 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/parse.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Parse returns the location represented by the input. +// Only file paths are accepted, not URIs. +// The returned span will be normalized, and thus if printed may produce a +// different string. +func Parse(input string) Span { + // :0:0#0-0:0#0 + valid := input + var hold, offset int + hadCol := false + suf := rstripSuffix(input) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep == ":" { + valid = suf.remains + hold = suf.num + hadCol = true + suf = rstripSuffix(suf.remains) + } + switch { + case suf.sep == ":": + return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), Point{}) + case suf.sep == "-": + // we have a span, fall out of the case to continue + default: + // separator not valid, rewind to either the : or the start + return New(URIFromPath(valid), NewPoint(hold, 0, offset), Point{}) + } + // only the span form can get here + // at this point we still don't know what the numbers we have mean + // if have not yet seen a : then we might have either a line or a column depending + // on whether start has a column or not + // we build an end point and will fix it later if needed + end := NewPoint(suf.num, hold, offset) + hold, offset = 0, 0 + suf = rstripSuffix(suf.remains) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep != ":" { + // turns out we don't have a span after all, rewind + return New(URIFromPath(valid), end, Point{}) + } + valid = suf.remains + hold = suf.num + suf = rstripSuffix(suf.remains) + if suf.sep != ":" { + // line#offset only + return New(URIFromPath(valid), NewPoint(hold, 0, offset), end) + } + // we have a column, so if end only had one number, it is also the column + if !hadCol { + end = NewPoint(suf.num, end.v.Line, end.v.Offset) + } + return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), end) +} + +type suffix struct { + remains string + sep string + num int +} + +func rstripSuffix(input string) suffix { + if len(input) == 0 { + return suffix{"", "", -1} + } + remains := input + num := -1 + // first see if we have a number at the end + last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) + if last >= 0 && last < len(remains)-1 { + number, err := strconv.ParseInt(remains[last+1:], 10, 64) + if err == nil { + num = int(number) + remains = remains[:last+1] + } + } + // now see if we have a trailing separator + r, w := utf8.DecodeLastRuneInString(remains) + if r != ':' && r != '#' && r == '#' { + return suffix{input, "", -1} + } + remains = remains[:len(remains)-w] + return suffix{remains, string(r), num} +} diff --git a/vendor/github.com/hexops/gotextdiff/span/span.go b/vendor/github.com/hexops/gotextdiff/span/span.go new file mode 100644 index 0000000000..4d2ad09866 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/span.go @@ -0,0 +1,285 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package span contains support for representing with positions and ranges in +// text files. +package span + +import ( + "encoding/json" + "fmt" + "path" +) + +// Span represents a source code range in standardized form. +type Span struct { + v span +} + +// Point represents a single point within a file. +// In general this should only be used as part of a Span, as on its own it +// does not carry enough information. +type Point struct { + v point +} + +type span struct { + URI URI `json:"uri"` + Start point `json:"start"` + End point `json:"end"` +} + +type point struct { + Line int `json:"line"` + Column int `json:"column"` + Offset int `json:"offset"` +} + +// Invalid is a span that reports false from IsValid +var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} + +var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} + +// Converter is the interface to an object that can convert between line:column +// and offset forms for a single file. +type Converter interface { + //ToPosition converts from an offset to a line:column pair. + ToPosition(offset int) (int, int, error) + //ToOffset converts from a line:column pair to an offset. + ToOffset(line, col int) (int, error) +} + +func New(uri URI, start Point, end Point) Span { + s := Span{v: span{URI: uri, Start: start.v, End: end.v}} + s.v.clean() + return s +} + +func NewPoint(line, col, offset int) Point { + p := Point{v: point{Line: line, Column: col, Offset: offset}} + p.v.clean() + return p +} + +func Compare(a, b Span) int { + if r := CompareURI(a.URI(), b.URI()); r != 0 { + return r + } + if r := comparePoint(a.v.Start, b.v.Start); r != 0 { + return r + } + return comparePoint(a.v.End, b.v.End) +} + +func ComparePoint(a, b Point) int { + return comparePoint(a.v, b.v) +} + +func comparePoint(a, b point) int { + if !a.hasPosition() { + if a.Offset < b.Offset { + return -1 + } + if a.Offset > b.Offset { + return 1 + } + return 0 + } + if a.Line < b.Line { + return -1 + } + if a.Line > b.Line { + return 1 + } + if a.Column < b.Column { + return -1 + } + if a.Column > b.Column { + return 1 + } + return 0 +} + +func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } +func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } +func (s Span) IsValid() bool { return s.v.Start.isValid() } +func (s Span) IsPoint() bool { return s.v.Start == s.v.End } +func (s Span) URI() URI { return s.v.URI } +func (s Span) Start() Point { return Point{s.v.Start} } +func (s Span) End() Point { return Point{s.v.End} } +func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } +func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } + +func (p Point) HasPosition() bool { return p.v.hasPosition() } +func (p Point) HasOffset() bool { return p.v.hasOffset() } +func (p Point) IsValid() bool { return p.v.isValid() } +func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } +func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } +func (p Point) Line() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Line +} +func (p Point) Column() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Column +} +func (p Point) Offset() int { + if !p.v.hasOffset() { + panic(fmt.Errorf("offset not set in %v", p.v)) + } + return p.v.Offset +} + +func (p point) hasPosition() bool { return p.Line > 0 } +func (p point) hasOffset() bool { return p.Offset >= 0 } +func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } +func (p point) isZero() bool { + return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) +} + +func (s *span) clean() { + //this presumes the points are already clean + if !s.End.isValid() || (s.End == point{}) { + s.End = s.Start + } +} + +func (p *point) clean() { + if p.Line < 0 { + p.Line = 0 + } + if p.Column <= 0 { + if p.Line > 0 { + p.Column = 1 + } else { + p.Column = 0 + } + } + if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { + p.Offset = -1 + } +} + +// Format implements fmt.Formatter to print the Location in a standard form. +// The format produced is one that can be read back in using Parse. +func (s Span) Format(f fmt.State, c rune) { + fullForm := f.Flag('+') + preferOffset := f.Flag('#') + // we should always have a uri, simplify if it is file format + //TODO: make sure the end of the uri is unambiguous + uri := string(s.v.URI) + if c == 'f' { + uri = path.Base(uri) + } else if !fullForm { + uri = s.v.URI.Filename() + } + fmt.Fprint(f, uri) + if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { + return + } + // see which bits of start to write + printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) + printLine := s.HasPosition() && (fullForm || !printOffset) + printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) + fmt.Fprint(f, ":") + if printLine { + fmt.Fprintf(f, "%d", s.v.Start.Line) + } + if printColumn { + fmt.Fprintf(f, ":%d", s.v.Start.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.Start.Offset) + } + // start is written, do we need end? + if s.IsPoint() { + return + } + // we don't print the line if it did not change + printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) + fmt.Fprint(f, "-") + if printLine { + fmt.Fprintf(f, "%d", s.v.End.Line) + } + if printColumn { + if printLine { + fmt.Fprint(f, ":") + } + fmt.Fprintf(f, "%d", s.v.End.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.End.Offset) + } +} + +func (s Span) WithPosition(c Converter) (Span, error) { + if err := s.update(c, true, false); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithOffset(c Converter) (Span, error) { + if err := s.update(c, false, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithAll(c Converter) (Span, error) { + if err := s.update(c, true, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s *Span) update(c Converter, withPos, withOffset bool) error { + if !s.IsValid() { + return fmt.Errorf("cannot add information to an invalid span") + } + if withPos && !s.HasPosition() { + if err := s.v.Start.updatePosition(c); err != nil { + return err + } + if s.v.End.Offset == s.v.Start.Offset { + s.v.End = s.v.Start + } else if err := s.v.End.updatePosition(c); err != nil { + return err + } + } + if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { + if err := s.v.Start.updateOffset(c); err != nil { + return err + } + if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { + s.v.End.Offset = s.v.Start.Offset + } else if err := s.v.End.updateOffset(c); err != nil { + return err + } + } + return nil +} + +func (p *point) updatePosition(c Converter) error { + line, col, err := c.ToPosition(p.Offset) + if err != nil { + return err + } + p.Line = line + p.Column = col + return nil +} + +func (p *point) updateOffset(c Converter) error { + offset, err := c.ToOffset(p.Line, p.Column) + if err != nil { + return err + } + p.Offset = offset + return nil +} diff --git a/vendor/github.com/hexops/gotextdiff/span/token.go b/vendor/github.com/hexops/gotextdiff/span/token.go new file mode 100644 index 0000000000..6f8b9b570c --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/token.go @@ -0,0 +1,194 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "go/token" +) + +// Range represents a source code range in token.Pos form. +// It also carries the FileSet that produced the positions, so that it is +// self contained. +type Range struct { + FileSet *token.FileSet + Start token.Pos + End token.Pos + Converter Converter +} + +type FileConverter struct { + file *token.File +} + +// TokenConverter is a Converter backed by a token file set and file. +// It uses the file set methods to work out the conversions, which +// makes it fast and does not require the file contents. +type TokenConverter struct { + FileConverter + fset *token.FileSet +} + +// NewRange creates a new Range from a FileSet and two positions. +// To represent a point pass a 0 as the end pos. +func NewRange(fset *token.FileSet, start, end token.Pos) Range { + return Range{ + FileSet: fset, + Start: start, + End: end, + } +} + +// NewTokenConverter returns an implementation of Converter backed by a +// token.File. +func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { + return &TokenConverter{fset: fset, FileConverter: FileConverter{file: f}} +} + +// NewContentConverter returns an implementation of Converter for the +// given file content. +func NewContentConverter(filename string, content []byte) *TokenConverter { + fset := token.NewFileSet() + f := fset.AddFile(filename, -1, len(content)) + f.SetLinesForContent(content) + return NewTokenConverter(fset, f) +} + +// IsPoint returns true if the range represents a single point. +func (r Range) IsPoint() bool { + return r.Start == r.End +} + +// Span converts a Range to a Span that represents the Range. +// It will fill in all the members of the Span, calculating the line and column +// information. +func (r Range) Span() (Span, error) { + if !r.Start.IsValid() { + return Span{}, fmt.Errorf("start pos is not valid") + } + f := r.FileSet.File(r.Start) + if f == nil { + return Span{}, fmt.Errorf("file not found in FileSet") + } + return FileSpan(f, r.Converter, r.Start, r.End) +} + +// FileSpan returns a span within tok, using converter to translate between +// offsets and positions. +func FileSpan(tok *token.File, converter Converter, start, end token.Pos) (Span, error) { + var s Span + var err error + var startFilename string + startFilename, s.v.Start.Line, s.v.Start.Column, err = position(tok, start) + if err != nil { + return Span{}, err + } + s.v.URI = URIFromPath(startFilename) + if end.IsValid() { + var endFilename string + endFilename, s.v.End.Line, s.v.End.Column, err = position(tok, end) + if err != nil { + return Span{}, err + } + // In the presence of line directives, a single File can have sections from + // multiple file names. + if endFilename != startFilename { + return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename) + } + } + s.v.Start.clean() + s.v.End.clean() + s.v.clean() + if converter != nil { + return s.WithOffset(converter) + } + if startFilename != tok.Name() { + return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", tok.Name(), startFilename) + } + return s.WithOffset(&FileConverter{tok}) +} + +func position(f *token.File, pos token.Pos) (string, int, int, error) { + off, err := offset(f, pos) + if err != nil { + return "", 0, 0, err + } + return positionFromOffset(f, off) +} + +func positionFromOffset(f *token.File, offset int) (string, int, int, error) { + if offset > f.Size() { + return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size()) + } + pos := f.Pos(offset) + p := f.Position(pos) + // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if + // the file's last character is not a newline. + if offset == f.Size() { + return p.Filename, p.Line + 1, 1, nil + } + return p.Filename, p.Line, p.Column, nil +} + +// offset is a copy of the Offset function in go/token, but with the adjustment +// that it does not panic on invalid positions. +func offset(f *token.File, pos token.Pos) (int, error) { + if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { + return 0, fmt.Errorf("invalid pos") + } + return int(pos) - f.Base(), nil +} + +// Range converts a Span to a Range that represents the Span for the supplied +// File. +func (s Span) Range(converter *TokenConverter) (Range, error) { + s, err := s.WithOffset(converter) + if err != nil { + return Range{}, err + } + // go/token will panic if the offset is larger than the file's size, + // so check here to avoid panicking. + if s.Start().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) + } + if s.End().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) + } + return Range{ + FileSet: converter.fset, + Start: converter.file.Pos(s.Start().Offset()), + End: converter.file.Pos(s.End().Offset()), + Converter: converter, + }, nil +} + +func (l *FileConverter) ToPosition(offset int) (int, int, error) { + _, line, col, err := positionFromOffset(l.file, offset) + return line, col, err +} + +func (l *FileConverter) ToOffset(line, col int) (int, error) { + if line < 0 { + return -1, fmt.Errorf("line is not valid") + } + lineMax := l.file.LineCount() + 1 + if line > lineMax { + return -1, fmt.Errorf("line is beyond end of file %v", lineMax) + } else if line == lineMax { + if col > 1 { + return -1, fmt.Errorf("column is beyond end of file") + } + // at the end of the file, allowing for a trailing eol + return l.file.Size(), nil + } + pos := lineStart(l.file, line) + if !pos.IsValid() { + return -1, fmt.Errorf("line is not in file") + } + // we assume that column is in bytes here, and that the first byte of a + // line is at column 1 + pos += token.Pos(col - 1) + return offset(l.file, pos) +} diff --git a/vendor/github.com/hexops/gotextdiff/span/token111.go b/vendor/github.com/hexops/gotextdiff/span/token111.go new file mode 100644 index 0000000000..bf7a5406b6 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/token111.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package span + +import ( + "go/token" +) + +// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go +// versions <= 1.11, we borrow logic from the analysisutil package. +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + // Use binary search to find the start offset of this line. + + min := 0 // inclusive + max := f.Size() // exclusive + for { + offset := (min + max) / 2 + pos := f.Pos(offset) + posn := f.Position(pos) + if posn.Line == line { + return pos - (token.Pos(posn.Column) - 1) + } + + if min+1 >= max { + return token.NoPos + } + + if posn.Line < line { + min = offset + } else { + max = offset + } + } +} diff --git a/vendor/github.com/hexops/gotextdiff/span/token112.go b/vendor/github.com/hexops/gotextdiff/span/token112.go new file mode 100644 index 0000000000..017aec9c13 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/token112.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package span + +import ( + "go/token" +) + +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + return f.LineStart(line) +} diff --git a/vendor/github.com/hexops/gotextdiff/span/uri.go b/vendor/github.com/hexops/gotextdiff/span/uri.go new file mode 100644 index 0000000000..2504921356 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/uri.go @@ -0,0 +1,169 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +const fileScheme = "file" + +// URI represents the full URI for a file. +type URI string + +func (uri URI) IsFile() bool { + return strings.HasPrefix(string(uri), "file://") +} + +// Filename returns the file path for the given URI. +// It is an error to call this on a URI that is not a valid filename. +func (uri URI) Filename() string { + filename, err := filename(uri) + if err != nil { + panic(err) + } + return filepath.FromSlash(filename) +} + +func filename(uri URI) (string, error) { + if uri == "" { + return "", nil + } + u, err := url.ParseRequestURI(string(uri)) + if err != nil { + return "", err + } + if u.Scheme != fileScheme { + return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) + } + // If the URI is a Windows URI, we trim the leading "/" and lowercase + // the drive letter, which will never be case sensitive. + if isWindowsDriveURIPath(u.Path) { + u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:] + } + return u.Path, nil +} + +func URIFromURI(s string) URI { + if !strings.HasPrefix(s, "file://") { + return URI(s) + } + + if !strings.HasPrefix(s, "file:///") { + // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789. + s = "file:///" + s[len("file://"):] + } + // Even though the input is a URI, it may not be in canonical form. VS Code + // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize. + path, err := url.PathUnescape(s[len("file://"):]) + if err != nil { + panic(err) + } + + // File URIs from Windows may have lowercase drive letters. + // Since drive letters are guaranteed to be case insensitive, + // we change them to uppercase to remain consistent. + // For example, file:///c:/x/y/z becomes file:///C:/x/y/z. + if isWindowsDriveURIPath(path) { + path = path[:1] + strings.ToUpper(string(path[1])) + path[2:] + } + u := url.URL{Scheme: fileScheme, Path: path} + return URI(u.String()) +} + +func CompareURI(a, b URI) int { + if equalURI(a, b) { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func equalURI(a, b URI) bool { + if a == b { + return true + } + // If we have the same URI basename, we may still have the same file URIs. + if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { + return false + } + fa, err := filename(a) + if err != nil { + return false + } + fb, err := filename(b) + if err != nil { + return false + } + // Stat the files to check if they are equal. + infoa, err := os.Stat(filepath.FromSlash(fa)) + if err != nil { + return false + } + infob, err := os.Stat(filepath.FromSlash(fb)) + if err != nil { + return false + } + return os.SameFile(infoa, infob) +} + +// URIFromPath returns a span URI for the supplied file path. +// It will always have the file scheme. +func URIFromPath(path string) URI { + if path == "" { + return "" + } + // Handle standard library paths that contain the literal "$GOROOT". + // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. + const prefix = "$GOROOT" + if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { + suffix := path[len(prefix):] + path = runtime.GOROOT() + suffix + } + if !isWindowsDrivePath(path) { + if abs, err := filepath.Abs(path); err == nil { + path = abs + } + } + // Check the file path again, in case it became absolute. + if isWindowsDrivePath(path) { + path = "/" + strings.ToUpper(string(path[0])) + path[1:] + } + path = filepath.ToSlash(path) + u := url.URL{ + Scheme: fileScheme, + Path: path, + } + return URI(u.String()) +} + +// isWindowsDrivePath returns true if the file path is of the form used by +// Windows. We check if the path begins with a drive letter, followed by a ":". +// For example: C:/x/y/z. +func isWindowsDrivePath(path string) bool { + if len(path) < 3 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:"). +func isWindowsDriveURIPath(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} diff --git a/vendor/github.com/hexops/gotextdiff/span/utf16.go b/vendor/github.com/hexops/gotextdiff/span/utf16.go new file mode 100644 index 0000000000..f06a2468b6 --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/span/utf16.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "unicode/utf16" + "unicode/utf8" +) + +// ToUTF16Column calculates the utf16 column expressed by the point given the +// supplied file contents. +// This is used to convert from the native (always in bytes) column +// representation and the utf16 counts used by some editors. +func ToUTF16Column(p Point, content []byte) (int, error) { + if !p.HasPosition() { + return -1, fmt.Errorf("ToUTF16Column: point is missing position") + } + if !p.HasOffset() { + return -1, fmt.Errorf("ToUTF16Column: point is missing offset") + } + offset := p.Offset() // 0-based + colZero := p.Column() - 1 // 0-based + if colZero == 0 { + // 0-based column 0, so it must be chr 1 + return 1, nil + } else if colZero < 0 { + return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) + } + // work out the offset at the start of the line using the column + lineOffset := offset - colZero + if lineOffset < 0 || offset > len(content) { + return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) + } + // Use the offset to pick out the line start. + // This cannot panic: offset > len(content) and lineOffset < offset. + start := content[lineOffset:] + + // Now, truncate down to the supplied column. + start = start[:colZero] + + // and count the number of utf16 characters + // in theory we could do this by hand more efficiently... + return len(utf16.Encode([]rune(string(start)))) + 1, nil +} + +// FromUTF16Column advances the point by the utf16 character offset given the +// supplied line contents. +// This is used to convert from the utf16 counts used by some editors to the +// native (always in bytes) column representation. +func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { + if !p.HasOffset() { + return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") + } + // if chr is 1 then no adjustment needed + if chr <= 1 { + return p, nil + } + if p.Offset() >= len(content) { + return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) + } + remains := content[p.Offset():] + // scan forward the specified number of characters + for count := 1; count < chr; count++ { + if len(remains) <= 0 { + return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") + } + r, w := utf8.DecodeRune(remains) + if r == '\n' { + // Per the LSP spec: + // + // > If the character value is greater than the line length it + // > defaults back to the line length. + break + } + remains = remains[w:] + if r >= 0x10000 { + // a two point rune + count++ + // if we finished in a two point rune, do not advance past the first + if count >= chr { + break + } + } + p.v.Column += w + p.v.Offset += w + } + return p, nil +} diff --git a/vendor/github.com/hexops/gotextdiff/unified.go b/vendor/github.com/hexops/gotextdiff/unified.go new file mode 100644 index 0000000000..b7d85cfccf --- /dev/null +++ b/vendor/github.com/hexops/gotextdiff/unified.go @@ -0,0 +1,210 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gotextdiff + +import ( + "fmt" + "strings" +) + +// Unified represents a set of edits as a unified diff. +type Unified struct { + // From is the name of the original file. + From string + // To is the name of the modified file. + To string + // Hunks is the set of edit hunks needed to transform the file content. + Hunks []*Hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type Hunk struct { + // The line in the original source where the hunk starts. + FromLine int + // The line in the original source where the hunk finishes. + ToLine int + // The set of line based edits to apply. + Lines []Line +} + +// Line represents a single line operation to apply as part of a Hunk. +type Line struct { + // Kind is the type of line this represents, deletion, insertion or copy. + Kind OpKind + // Content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + Content string +} + +// OpKind is used to denote the type of operation a line represents. +type OpKind int + +const ( + // Delete is the operation kind for a line that is present in the input + // but not in the output. + Delete OpKind = iota + // Insert is the operation kind for a line that is new in the output. + Insert + // Equal is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + Equal +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k OpKind) String() string { + switch k { + case Delete: + return "delete" + case Insert: + return "insert" + case Equal: + return "equal" + default: + panic("unknown operation kind") + } +} + +const ( + edge = 3 + gap = edge * 2 +) + +// ToUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func ToUnified(from, to string, content string, edits []TextEdit) Unified { + u := Unified{ + From: from, + To: to, + } + if len(edits) == 0 { + return u + } + c, edits, partial := prepareEdits(content, edits) + if partial { + edits = lineEdits(content, c, edits) + } + lines := splitLines(content) + var h *Hunk + last := 0 + toLine := 0 + for _, edit := range edits { + start := edit.Span.Start().Line() - 1 + end := edit.Span.End().Line() - 1 + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + toLine += start - last + h = &Hunk{ + FromLine: start + 1, + ToLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-edge, start) + h.FromLine -= delta + h.ToLine -= delta + } + last = start + for i := start; i < end; i++ { + h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]}) + last++ + } + if edit.NewText != "" { + for _, line := range splitLines(edit.NewText) { + h.Lines = append(h.Lines, Line{Kind: Insert, Content: line}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + return u +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *Hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]}) + delta++ + } + return delta +} + +// Format converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u Unified) Format(f fmt.State, r rune) { + if len(u.Hunks) == 0 { + return + } + fmt.Fprintf(f, "--- %s\n", u.From) + fmt.Fprintf(f, "+++ %s\n", u.To) + for _, hunk := range u.Hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fromCount++ + case Insert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(f, "@@") + if fromCount > 1 { + fmt.Fprintf(f, " -%d,%d", hunk.FromLine, fromCount) + } else { + fmt.Fprintf(f, " -%d", hunk.FromLine) + } + if toCount > 1 { + fmt.Fprintf(f, " +%d,%d", hunk.ToLine, toCount) + } else { + fmt.Fprintf(f, " +%d", hunk.ToLine) + } + fmt.Fprint(f, " @@\n") + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fmt.Fprintf(f, "-%s", l.Content) + case Insert: + fmt.Fprintf(f, "+%s", l.Content) + default: + fmt.Fprintf(f, " %s", l.Content) + } + if !strings.HasSuffix(l.Content, "\n") { + fmt.Fprintf(f, "\n\\ No newline at end of file\n") + } + } + } +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go index 9d2d8a4bab..06a91f0868 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package mousetrap diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go index 336142a5e3..0c56880216 100644 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -1,81 +1,32 @@ -// +build windows -// +build !go1.4 - package mousetrap import ( - "fmt" - "os" "syscall" "unsafe" ) -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err } - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err } } } -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - // StartedByExplorer returns true if the program was invoked by the user double-clicking // on the executable from explorer.exe // @@ -83,16 +34,9 @@ func getppid() (pid int, err error) { // It does not guarantee that the program was run from a terminal. It only can tell you // whether it was launched from explorer.exe func StartedByExplorer() bool { - ppid, err := getppid() + pe, err := getProcessEntry(syscall.Getppid()) if err != nil { return false } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) } diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3c..0000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/jgautheron/goconst/.gitignore b/vendor/github.com/jgautheron/goconst/.gitignore new file mode 100644 index 0000000000..a9d34d9c41 --- /dev/null +++ b/vendor/github.com/jgautheron/goconst/.gitignore @@ -0,0 +1,2 @@ +.idea +goconst \ No newline at end of file diff --git a/vendor/github.com/jgautheron/goconst/README.md b/vendor/github.com/jgautheron/goconst/README.md index 8dd093baf0..c671eb5412 100644 --- a/vendor/github.com/jgautheron/goconst/README.md +++ b/vendor/github.com/jgautheron/goconst/README.md @@ -23,6 +23,7 @@ Usage: Flags: -ignore exclude files matching the given regular expression + -ignore-strings exclude strings matching the given regular expression -ignore-tests exclude tests from the search (default: true) -min-occurrences report from how many occurrences (default: 2) -min-length only report strings with the minimum given length (default: 3) diff --git a/vendor/github.com/jgautheron/goconst/api.go b/vendor/github.com/jgautheron/goconst/api.go index d56fcd6c25..b838e035f6 100644 --- a/vendor/github.com/jgautheron/goconst/api.go +++ b/vendor/github.com/jgautheron/goconst/api.go @@ -14,6 +14,7 @@ type Issue struct { } type Config struct { + IgnoreStrings string IgnoreTests bool MatchWithConstants bool MinStringLength int @@ -28,6 +29,7 @@ func Run(files []*ast.File, fset *token.FileSet, cfg *Config) ([]Issue, error) { p := New( "", "", + cfg.IgnoreStrings, cfg.IgnoreTests, cfg.MatchWithConstants, cfg.ParseNumbers, diff --git a/vendor/github.com/jgautheron/goconst/parser.go b/vendor/github.com/jgautheron/goconst/parser.go index 2ed7a9a909..2f32740b96 100644 --- a/vendor/github.com/jgautheron/goconst/parser.go +++ b/vendor/github.com/jgautheron/goconst/parser.go @@ -24,11 +24,11 @@ const ( type Parser struct { // Meant to be passed via New() - path, ignore string - ignoreTests, matchConstant bool - minLength, minOccurrences int - numberMin, numberMax int - excludeTypes map[Type]bool + path, ignore, ignoreStrings string + ignoreTests, matchConstant bool + minLength, minOccurrences int + numberMin, numberMax int + excludeTypes map[Type]bool supportedTokens []token.Token @@ -39,7 +39,7 @@ type Parser struct { // New creates a new instance of the parser. // This is your entry point if you'd like to use goconst as an API. -func New(path, ignore string, ignoreTests, matchConstant, numbers bool, numberMin, numberMax, minLength, minOccurrences int, excludeTypes map[Type]bool) *Parser { +func New(path, ignore, ignoreStrings string, ignoreTests, matchConstant, numbers bool, numberMin, numberMax, minLength, minOccurrences int, excludeTypes map[Type]bool) *Parser { supportedTokens := []token.Token{token.STRING} if numbers { supportedTokens = append(supportedTokens, token.INT, token.FLOAT) @@ -48,6 +48,7 @@ func New(path, ignore string, ignoreTests, matchConstant, numbers bool, numberMi return &Parser{ path: path, ignore: ignore, + ignoreStrings: ignoreStrings, ignoreTests: ignoreTests, matchConstant: matchConstant, minLength: minLength, @@ -98,12 +99,22 @@ func (p *Parser) ProcessResults() { delete(p.strs, str) } + if p.ignoreStrings != "" { + match, err := regexp.MatchString(p.ignoreStrings, str) + if err != nil { + log.Println(err) + } + if match { + delete(p.strs, str) + } + } + // If the value is a number - if i, err := strconv.Atoi(str); err == nil { - if p.numberMin != 0 && i < p.numberMin { + if i, err := strconv.ParseInt(str, 0, 0); err == nil { + if p.numberMin != 0 && i < int64(p.numberMin) { delete(p.strs, str) } - if p.numberMax != 0 && i > p.numberMax { + if p.numberMax != 0 && i > int64(p.numberMax) { delete(p.strs, str) } } diff --git a/vendor/github.com/jgautheron/goconst/visitor.go b/vendor/github.com/jgautheron/goconst/visitor.go index c0974da8fd..a553814f5c 100644 --- a/vendor/github.com/jgautheron/goconst/visitor.go +++ b/vendor/github.com/jgautheron/goconst/visitor.go @@ -62,10 +62,6 @@ func (v *treeVisitor) Visit(node ast.Node) ast.Visitor { // if foo == "moo" case *ast.BinaryExpr: - if t.Op != token.EQL && t.Op != token.NEQ { - return v - } - var lit *ast.BasicLit var ok bool diff --git a/vendor/github.com/jjti/go-spancheck/.gitignore b/vendor/github.com/jjti/go-spancheck/.gitignore new file mode 100644 index 0000000000..1f83be414c --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/.gitignore @@ -0,0 +1,19 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +src/ diff --git a/vendor/github.com/jjti/go-spancheck/.golangci.yml b/vendor/github.com/jjti/go-spancheck/.golangci.yml new file mode 100644 index 0000000000..15d8513d68 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/.golangci.yml @@ -0,0 +1,103 @@ +## A good ref for this: https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322 + +run: + timeout: 5m + tests: true +linters: + enable: + - asasalint # checks for pass []any as any in variadic func(...any) + - asciicheck # checks that your code does not contain non-ASCII identifiers + - bidichk # checks for dangerous unicode character sequences + - bodyclose + - containedctx + - decorder # checks declaration order and count of types, constants, variables and functions + - dogsled + - dupword # checks for duplicate words in the source code + - durationcheck # checks for two durations multiplied together + - errcheck + - errname + - errorlint + - exhaustive # checks exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - gci + - gochecknoinits # checks that no init functions are present in Go code + - gocritic + - gomnd + - gosimple + - govet + - importas # enforces consistent import aliases + - ineffassign + - loggercheck + - makezero # finds slice declarations with non-zero initial length + - mirror + - misspell + - musttag # enforces field tags in (un)marshaled structs + - nakedret + - nestif # reports deeply nested if statements + - nilerr # finds the code that returns nil even if it checks that the error is not nil + - noctx # finds sending http request without context.Context + - nolintlint # reports ill-formed or insufficient nolint directives + - predeclared # finds code that shadows one of Go's predeclared identifiers + - promlinter + - reassign # checks that package variables are not reassigned + - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint + - staticcheck + - stylecheck + - tenv + - thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - unconvert # removes unnecessary type conversions + - unparam # reports unused function parameters + - unused + - usestdlibvars # detects the possibility to use variables/constants from the Go standard library + - wastedassign # finds wasted assignment statements + - whitespace # detects leading and trailing whitespace +linters-settings: + gci: + skip-generated: true + custom-order: true + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/jjti) + exhaustive: + # Program elements to check for exhaustiveness. + # Default: [ switch ] + check: + - switch + - map + gocritic: + settings: + captLocal: + # Whether to restrict checker to params only. + # Default: true + paramsOnly: false + underef: + # Whether to skip (*x).method() calls where x is a pointer receiver. + # Default: true + skipRecvDeref: false + govet: + enable-all: true + disable: + - fieldalignment # too strict + - shadow # bunch of false positive, doesn't realize when we return from a func + misspell: + locale: US + nakedret: + max-func-lines: 0 + nestif: + # Minimal complexity of if statements to report. + # Default: 5 + min-complexity: 4 + nolintlint: + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + stylecheck: + checks: ["all"] +issues: + include: + - EXC0001 # Error return value of x is not checked + - EXC0013 # package comment should be of the form "(.+)... + - EXC0014 # comment on exported (.+) should be of the form "(.+)..." + exclude: + - ifElseChain diff --git a/vendor/github.com/jjti/go-spancheck/CONTRIBUTING.md b/vendor/github.com/jjti/go-spancheck/CONTRIBUTING.md new file mode 100644 index 0000000000..32932fae10 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/CONTRIBUTING.md @@ -0,0 +1,51 @@ +# Contributing guideline + +Contributions are welcome + appreciated. + +## Open Requests + +These are a couple contributions I would especially appreciate: + +1. Add check for SetAttributes: https://github.com/jjti/go-spancheck/issues/1 +1. Add SuggestedFix(es): https://github.com/jjti/go-spancheck/issues/2 + +## Steps + +### 1. Create an Issue + +If one does not exist already, open a bug report or feature request in [https://github.com/jjti/go-spancheck/issues](https://github.com/jjti/go-spancheck/issues). + +### 2. Add a test case + +Test cases are in `/testdata`. + +If fixing a bug, you can add it to `testdata/enableall/enable_all.go` (for example): + +```go +func _() { + ctx, span := otel.Tracer("foo").Start(context.Background(), "bar") // want "span.End is not called on all paths, possible memory leak" + print(ctx.Done(), span.IsRecording()) +} // want "return can be reached without calling span.End" +``` + +If adding a new feature with a new combination of flags, create a new module within `testdata`: + +1. Create a new module, eg `testdata/setattributes` +1. Copy/paste go.mod/sum into the new module directory and update the module definition, eg `module github.com/jjti/go-spancheck/testdata/setattributes` +1. Add the module to the workspace in [go.work](./go.work) +1. Add the module's directory to the `testvendor` Make target in [Makefile](./Makefile) + +### 3. Run tests + +```bash +make test +``` + +### 4. Open a PR + +Eg of a GitHub snippet for PRs: + +```bash +alias gpr='gh pr view --web 2>/dev/null || gh pr create --web --fill' +gpr +``` diff --git a/vendor/github.com/jjti/go-spancheck/LICENSE b/vendor/github.com/jjti/go-spancheck/LICENSE new file mode 100644 index 0000000000..552ddf2dc5 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Joshua Timmons + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/jjti/go-spancheck/Makefile b/vendor/github.com/jjti/go-spancheck/Makefile new file mode 100644 index 0000000000..39d80f7c61 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/Makefile @@ -0,0 +1,27 @@ +.PHONY: fmt +fmt: + golangci-lint run --fix --config ./.golangci.yml + +.PHONY: test +test: testvendor + go test -v ./... + +# note: I'm copying https://github.com/ghostiam/protogetter/blob/main/testdata/Makefile +# +# x/tools/go/analysis/analysistest does not support go modules. To work around this issue +# we need to vendor any external modules to `./src`. +# +# Follow https://github.com/golang/go/issues/37054 for more details. +.PHONY: testvendor +testvendor: + @rm -rf base/src + @cd testdata/base && go mod vendor + @cp -r testdata/base/vendor testdata/base/src + @cp -r testdata/base/vendor testdata/disableerrorchecks/src + @cp -r testdata/base/vendor testdata/enableall/src + @rm -rf testdata/base/vendor + +.PHONY: install +install: + go install ./cmd/spancheck + @echo "Installed in $(shell which spancheck)" \ No newline at end of file diff --git a/vendor/github.com/jjti/go-spancheck/README.md b/vendor/github.com/jjti/go-spancheck/README.md new file mode 100644 index 0000000000..98aedec287 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/README.md @@ -0,0 +1,210 @@ +# go-spancheck + +![Latest release](https://img.shields.io/github/v/release/jjti/go-spancheck) +[![ci](https://github.com/jjti/go-spancheck/actions/workflows/ci.yaml/badge.svg)](https://github.com/jjti/go-spancheck/actions/workflows/ci.yaml) +[![Go Report Card](https://goreportcard.com/badge/github.com/jjti/go-spancheck)](https://goreportcard.com/report/github.com/jjti/go-spancheck) +[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) + +Checks usage of: + +- [OpenTelemetry spans](https://opentelemetry.io/docs/instrumentation/go/manual/) from [go.opentelemetry.io/otel/trace](go.opentelemetry.io/otel/trace) +- [OpenCensus spans](https://opencensus.io/quickstart/go/tracing/) from [go.opencensus.io/trace](https://pkg.go.dev/go.opencensus.io/trace#Span) + +## Installation & Usage + +```bash +go install github.com/jjti/go-spancheck/cmd/spancheck@latest +spancheck ./... +``` + +## Example + +```bash +spancheck -checks 'end,set-status,record-error' ./... +``` + +```go +func _() error { + // span.End is not called on all paths, possible memory leak + // span.SetStatus is not called on all paths + // span.RecordError is not called on all paths + _, span := otel.Tracer("foo").Start(context.Background(), "bar") + + if true { + // return can be reached without calling span.End + // return can be reached without calling span.SetStatus + // return can be reached without calling span.RecordError + return errors.New("err") + } + + return nil // return can be reached without calling span.End +} +``` + +## Configuration + +Only the `span.End()` check is enabled by default. The others can be enabled with `-checks 'end,set-status,record-error'`. + +```txt +$ spancheck -h +... +Flags: + -checks string + comma-separated list of checks to enable (options: end, set-status, record-error) (default "end") + -ignore-check-signatures string + comma-separated list of regex for function signatures that disable checks on errors +``` + +### Ignore check signatures + +The `span.SetStatus()` and `span.RecordError()` checks warn when there is: + +1. a path to return statement +1. that returns an error +1. without a call (to `SetStatus` or `RecordError`, respectively) + +But it's convenient to call `SetStatus` and `RecordError` from utility methods [[1](https://andydote.co.uk/2023/09/19/tracing-is-better/#step-2-wrap-the-errors)]. To support that, the `ignore-*-check-signatures` settings will suppress warnings if the configured function is present in the path. + +For example, by default, the code below would have warnings as shown: + +```go +func task(ctx context.Context) error { + ctx, span := otel.Tracer("foo").Start(ctx, "bar") // span.SetStatus is not called on all paths + defer span.End() + + if err := subTask(ctx); err != nil { + return recordErr(span, err) // return can be reached without calling span.SetStatus + } + + return nil +} + +func recordErr(span trace.Span, err error) error { + span.SetStatus(codes.Error, err.Error()) + span.RecordError(err) + return err +} +``` + +The warnings are can be ignored by setting `-ignore-check-signatures` flag to `recordErr`: + +```bash +spancheck -checks 'end,set-status,record-error' -ignore-check-signatures 'recordErr' ./... +``` + +## Problem Statement + +Tracing is a celebrated [[1](https://andydote.co.uk/2023/09/19/tracing-is-better/),[2](https://charity.wtf/2022/08/15/live-your-best-life-with-structured-events/)] and well marketed [[3](https://docs.datadoghq.com/tracing/),[4](https://www.honeycomb.io/distributed-tracing)] pillar of observability. But self-instrumented tracing requires a lot of easy-to-forget boilerplate: + +```go +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" +) + +func task(ctx context.Context) error { + ctx, span := otel.Tracer("foo").Start(ctx, "bar") + defer span.End() // call `.End()` + + if err := subTask(ctx); err != nil { + span.SetStatus(codes.Error, err.Error()) // call SetStatus(codes.Error, msg) to set status:error + span.RecordError(err) // call RecordError(err) to record an error event + return err + } + + return nil +} +``` + +For spans to be _really_ useful, developers need to: + +1. call `span.End()` always +1. call `span.SetStatus(codes.Error, msg)` on error +1. call `span.RecordError(err)` on error +1. call `span.SetAttributes()` liberally + +- OpenTelemetry: [Creating spans](https://opentelemetry.io/docs/instrumentation/go/manual/#creating-spans) +- Uptrace: [OpenTelemetry Go Tracing API](https://uptrace.dev/opentelemetry/go-tracing.html#quickstart) + +This linter helps developers with steps 1-3. + +## Checks + +This linter supports three checks, each documented below. Only the check for `span.End()` is enabled by default. See [Configuration](#configuration) for instructions on enabling the others. + +### `span.End()` + +Enabled by default. + +Not calling `End` can cause memory leaks and prevents spans from being closed. + +> Any Span that is created MUST also be ended. This is the responsibility of the user. Implementations of this API may leak memory or other resources if Spans are not ended. + +[source: trace.go](https://github.com/open-telemetry/opentelemetry-go/blob/98b32a6c3a87fbee5d34c063b9096f416b250897/trace/trace.go#L523) + +```go +func task(ctx context.Context) error { + otel.Tracer("app").Start(ctx, "foo") // span is unassigned, probable memory leak + _, span := otel.Tracer().Start(ctx, "foo") // span.End is not called on all paths, possible memory leak + return nil // return can be reached without calling span.End +} +``` + +### `span.SetStatus(codes.Error, "msg")` + +Disabled by default. Enable with `-checks 'set-status'`. + +Developers should call `SetStatus` on spans. The status attribute is an important, first-class attribute: + +1. observability platforms and APMs differentiate "success" vs "failure" using [span's status codes](https://docs.datadoghq.com/tracing/metrics/). +1. telemetry collector agents, like the [Open Telemetry Collector's Tail Sampling Processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md#:~:text=Sampling%20Processor.-,status_code,-%3A%20Sample%20based%20upon), are configurable to sample `Error` spans at a higher rate than `OK` spans. +1. observability platforms, like [DataDog, have trace retention filters that use spans' status](https://docs.datadoghq.com/tracing/trace_pipeline/trace_retention/). In other words, `status:error` spans often receive special treatment with the assumption they are more useful for debugging. And forgetting to set the status can lead to spans, with useful debugging information, being dropped. + +```go +func _() error { + _, span := otel.Tracer("foo").Start(context.Background(), "bar") // span.SetStatus is not called on all paths + defer span.End() + + if err := subTask(); err != nil { + span.RecordError(err) + return errors.New(err) // return can be reached without calling span.SetStatus + } + + return nil +} +``` + +OpenTelemetry docs: [Set span status](https://opentelemetry.io/docs/instrumentation/go/manual/#set-span-status). + +### `span.RecordError(err)` + +Disabled by default. Enable with `-checks 'record-error'`. + +Calling `RecordError` creates a new exception-type [event (structured log message)](https://opentelemetry.io/docs/concepts/signals/traces/#span-events) on the span. This is recommended to capture the error's stack trace. + +```go +func _() error { + _, span := otel.Tracer("foo").Start(context.Background(), "bar") // span.RecordError is not called on all paths + defer span.End() + + if err := subTask(); err != nil { + span.SetStatus(codes.Error, err.Error()) + return errors.New(err) // return can be reached without calling span.RecordError + } + + return nil +} +``` + +OpenTelemetry docs: [Record errors](https://opentelemetry.io/docs/instrumentation/go/manual/#record-errors). + +Note: this check is not applied to [OpenCensus spans](https://pkg.go.dev/go.opencensus.io/trace#SpanInterface) because they have no `RecordError` method. + +## Attribution + +This linter is the product of liberal copying of: + +- [github.com/golang/tools/go/analysis/passes/lostcancel](https://github.com/golang/tools/tree/master/go/analysis/passes/lostcancel) (half the linter) +- [github.com/tomarrell/wrapcheck](https://github.com/tomarrell/wrapcheck) (error type checking and config) +- [github.com/Antonboom/testifylint](https://github.com/Antonboom/testifylint) (README) +- [github.com/ghostiam/protogetter](https://github.com/ghostiam/protogetter/blob/main/testdata/Makefile) (test setup) diff --git a/vendor/github.com/jjti/go-spancheck/config.go b/vendor/github.com/jjti/go-spancheck/config.go new file mode 100644 index 0000000000..4005f49e01 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/config.go @@ -0,0 +1,141 @@ +package spancheck + +import ( + "flag" + "fmt" + "log" + "regexp" + "strings" +) + +// Check is a type of check that can be enabled or disabled. +type Check int + +const ( + // EndCheck if enabled, checks that span.End() is called after span creation and before the function returns. + EndCheck Check = iota + + // SetStatusCheck if enabled, checks that `span.SetStatus(codes.Error, msg)` is called when returning an error. + SetStatusCheck + + // RecordErrorCheck if enabled, checks that span.RecordError(err) is called when returning an error. + RecordErrorCheck +) + +func (c Check) String() string { + switch c { + case EndCheck: + return "end" + case SetStatusCheck: + return "set-status" + case RecordErrorCheck: + return "record-error" + default: + return "" + } +} + +var ( + // Checks is a list of all checks by name. + Checks = map[string]Check{ + EndCheck.String(): EndCheck, + SetStatusCheck.String(): SetStatusCheck, + RecordErrorCheck.String(): RecordErrorCheck, + } +) + +// Config is a configuration for the spancheck analyzer. +type Config struct { + fs flag.FlagSet + + // EnabledChecks is a list of checks to enable by name. + EnabledChecks []string + + // IgnoreChecksSignaturesSlice is a slice of strings that are turned into + // the IgnoreSetStatusCheckSignatures regex. + IgnoreChecksSignaturesSlice []string + + endCheckEnabled bool + setStatusEnabled bool + recordErrorEnabled bool + + // ignoreChecksSignatures is a regex that, if matched, disables the + // SetStatus and RecordError checks on error. + ignoreChecksSignatures *regexp.Regexp +} + +// NewDefaultConfig returns a new Config with default values. +func NewDefaultConfig() *Config { + return &Config{ + EnabledChecks: []string{EndCheck.String()}, + } +} + +// finalize parses checks and signatures from the public string slices of Config. +func (c *Config) finalize() { + c.parseSignatures() + + checks := parseChecks(c.EnabledChecks) + c.endCheckEnabled = contains(checks, EndCheck) + c.setStatusEnabled = contains(checks, SetStatusCheck) + c.recordErrorEnabled = contains(checks, RecordErrorCheck) +} + +// parseSignatures sets the Ignore*CheckSignatures regex from the string slices. +func (c *Config) parseSignatures() { + if c.ignoreChecksSignatures == nil && len(c.IgnoreChecksSignaturesSlice) > 0 { + if len(c.IgnoreChecksSignaturesSlice) == 1 && c.IgnoreChecksSignaturesSlice[0] == "" { + return + } + + c.ignoreChecksSignatures = createRegex(c.IgnoreChecksSignaturesSlice) + } +} + +func parseChecks(checksSlice []string) []Check { + if len(checksSlice) == 0 { + return nil + } + + checks := []Check{} + for _, check := range checksSlice { + checkName := strings.TrimSpace(check) + if checkName == "" { + continue + } + + check, ok := Checks[checkName] + if !ok { + continue + } + + checks = append(checks, check) + } + + return checks +} + +func createRegex(sigs []string) *regexp.Regexp { + if len(sigs) == 0 { + return nil + } + + regex := fmt.Sprintf("(%s)", strings.Join(sigs, "|")) + regexCompiled, err := regexp.Compile(regex) + if err != nil { + log.Default().Print("[WARN] failed to compile regex from signature flag", "regex", regex, "err", err) + return nil + } + + return regexCompiled +} + +func contains(s []Check, e Check) bool { + for _, a := range s { + if a == e { + return true + } + } + + return false +} diff --git a/vendor/github.com/jjti/go-spancheck/doc.go b/vendor/github.com/jjti/go-spancheck/doc.go new file mode 100644 index 0000000000..f9dec043f6 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/doc.go @@ -0,0 +1,37 @@ +// Package spancheck defines a linter that checks for mistakes with OTEL trace spans. +// +// # Analyzer spancheck +// +// spancheck: check for mistakes with OpenTelemetry trace spans. +// +// Common mistakes with OTEL trace spans include forgetting to call End: +// +// func(ctx context.Context) { +// ctx, span := otel.Tracer("app").Start(ctx, "span") +// // defer span.End() should be here +// +// // do stuff +// } +// +// Forgetting to set an Error status: +// +// ctx, span := otel.Tracer("app").Start(ctx, "span") +// defer span.End() +// +// if err := task(); err != nil { +// // span.SetStatus(codes.Error, err.Error()) should be here +// span.RecordError(err) +// return fmt.Errorf("failed to run task: %w", err) +// } +// +// Forgetting to record the Error: +// +// ctx, span := otel.Tracer("app").Start(ctx, "span") +// defer span.End() +// +// if err := task(); err != nil { +// span.SetStatus(codes.Error, err.Error()) +// // span.RecordError(err) should be here +// return fmt.Errorf("failed to run task: %w", err) +// } +package spancheck diff --git a/vendor/github.com/jjti/go-spancheck/go.work b/vendor/github.com/jjti/go-spancheck/go.work new file mode 100644 index 0000000000..7d0a87b9e1 --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/go.work @@ -0,0 +1,8 @@ +go 1.20 + +use ( + . + ./testdata/base + ./testdata/disableerrorchecks + ./testdata/enableall +) diff --git a/vendor/github.com/jjti/go-spancheck/go.work.sum b/vendor/github.com/jjti/go-spancheck/go.work.sum new file mode 100644 index 0000000000..f3cdef790d --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/go.work.sum @@ -0,0 +1,3 @@ +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= diff --git a/vendor/github.com/jjti/go-spancheck/spancheck.go b/vendor/github.com/jjti/go-spancheck/spancheck.go new file mode 100644 index 0000000000..ebfc1ac68c --- /dev/null +++ b/vendor/github.com/jjti/go-spancheck/spancheck.go @@ -0,0 +1,431 @@ +package spancheck + +import ( + "go/ast" + "go/types" + "log" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/ctrlflow" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/cfg" +) + +const stackLen = 32 + +// spanType differentiates span types. +type spanType int + +const ( + spanUnset spanType = iota // not a span + spanOpenTelemetry // from go.opentelemetry.io/otel + spanOpenCensus // from go.opencensus.io/trace +) + +var ( + // this approach stolen from errcheck + // https://github.com/kisielk/errcheck/blob/7f94c385d0116ccc421fbb4709e4a484d98325ee/errcheck/errcheck.go#L22 + errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) +) + +// NewAnalyzerWithConfig returns a new analyzer configured with the Config passed in. +// Its config can be set for testing. +func NewAnalyzerWithConfig(config *Config) *analysis.Analyzer { + return newAnalyzer(config) +} + +func newAnalyzer(config *Config) *analysis.Analyzer { + config.finalize() + + return &analysis.Analyzer{ + Name: "spancheck", + Doc: "Checks for mistakes with OpenTelemetry/Census spans.", + Flags: config.fs, + Run: run(config), + Requires: []*analysis.Analyzer{ + ctrlflow.Analyzer, + inspect.Analyzer, + }, + } +} + +func run(config *Config) func(*analysis.Pass) (interface{}, error) { + return func(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncLit)(nil), // f := func() {} + (*ast.FuncDecl)(nil), // func foo() {} + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + runFunc(pass, n, config) + }) + + return nil, nil + } +} + +type spanVar struct { + stmt ast.Node + id *ast.Ident + vr *types.Var + spanType spanType +} + +// runFunc checks if the node is a function, has a span, and the span never has SetStatus set. +func runFunc(pass *analysis.Pass, node ast.Node, config *Config) { + // copying https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/lostcancel/lostcancel.go + + // Find scope of function node + var funcScope *types.Scope + switch v := node.(type) { + case *ast.FuncLit: + funcScope = pass.TypesInfo.Scopes[v.Type] + case *ast.FuncDecl: + funcScope = pass.TypesInfo.Scopes[v.Type] + } + + // Maps each span variable to its defining ValueSpec/AssignStmt. + spanVars := make(map[*ast.Ident]spanVar) + + // Find the set of span vars to analyze. + stack := make([]ast.Node, 0, stackLen) + ast.Inspect(node, func(n ast.Node) bool { + switch n.(type) { + case *ast.FuncLit: + if len(stack) > 0 { + return false // don't stray into nested functions + } + case nil: + stack = stack[:len(stack)-1] // pop + return true + } + stack = append(stack, n) // push + + // Look for [{AssignStmt,ValueSpec} CallExpr SelectorExpr]: + // + // ctx, span := otel.Tracer("app").Start(...) + // ctx, span = otel.Tracer("app").Start(...) + // var ctx, span = otel.Tracer("app").Start(...) + sType, sStart := isSpanStart(pass.TypesInfo, n) + if !sStart || !isCall(stack[len(stack)-2]) { + return true + } + + stmt := stack[len(stack)-3] + id := getID(stmt) + if id == nil { + pass.ReportRangef(n, "span is unassigned, probable memory leak") + return true + } + + if id.Name == "_" { + pass.ReportRangef(id, "span is unassigned, probable memory leak") + } else if v, ok := pass.TypesInfo.Uses[id].(*types.Var); ok { + // If the span variable is defined outside function scope, + // do not analyze it. + if funcScope.Contains(v.Pos()) { + spanVars[id] = spanVar{ + vr: v, + stmt: stmt, + id: id, + spanType: sType, + } + } + } else if v, ok := pass.TypesInfo.Defs[id].(*types.Var); ok { + spanVars[id] = spanVar{ + vr: v, + stmt: stmt, + id: id, + spanType: sType, + } + } + + return true + }) + + if len(spanVars) == 0 { + return // no need to inspect CFG + } + + // Obtain the CFG. + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + var g *cfg.CFG + var sig *types.Signature + switch node := node.(type) { + case *ast.FuncDecl: + sig, _ = pass.TypesInfo.Defs[node.Name].Type().(*types.Signature) + g = cfgs.FuncDecl(node) + case *ast.FuncLit: + sig, _ = pass.TypesInfo.Types[node.Type].Type.(*types.Signature) + g = cfgs.FuncLit(node) + } + if sig == nil { + return // missing type information + } + + // Check for missing calls. + for _, sv := range spanVars { + if config.endCheckEnabled { + // Check if there's no End to the span. + if ret := missingSpanCalls(pass, g, sv, "End", func(pass *analysis.Pass, ret *ast.ReturnStmt) *ast.ReturnStmt { return ret }, nil); ret != nil { + pass.ReportRangef(sv.stmt, "%s.End is not called on all paths, possible memory leak", sv.vr.Name()) + pass.ReportRangef(ret, "return can be reached without calling %s.End", sv.vr.Name()) + } + } + + if config.setStatusEnabled { + // Check if there's no SetStatus to the span setting an error. + if ret := missingSpanCalls(pass, g, sv, "SetStatus", returnsErr, config.ignoreChecksSignatures); ret != nil { + pass.ReportRangef(sv.stmt, "%s.SetStatus is not called on all paths", sv.vr.Name()) + pass.ReportRangef(ret, "return can be reached without calling %s.SetStatus", sv.vr.Name()) + } + } + + if config.recordErrorEnabled && sv.spanType == spanOpenTelemetry { // RecordError only exists in OpenTelemetry + // Check if there's no RecordError to the span setting an error. + if ret := missingSpanCalls(pass, g, sv, "RecordError", returnsErr, config.ignoreChecksSignatures); ret != nil { + pass.ReportRangef(sv.stmt, "%s.RecordError is not called on all paths", sv.vr.Name()) + pass.ReportRangef(ret, "return can be reached without calling %s.RecordError", sv.vr.Name()) + } + } + } +} + +// isSpanStart reports whether n is tracer.Start() +func isSpanStart(info *types.Info, n ast.Node) (spanType, bool) { + sel, ok := n.(*ast.SelectorExpr) + if !ok { + return spanUnset, false + } + + switch sel.Sel.Name { + case "Start": // https://github.com/open-telemetry/opentelemetry-go/blob/98b32a6c3a87fbee5d34c063b9096f416b250897/trace/trace.go#L523 + obj, ok := info.Uses[sel.Sel] + return spanOpenTelemetry, ok && obj.Pkg().Path() == "go.opentelemetry.io/otel/trace" + case "StartSpan": // https://pkg.go.dev/go.opencensus.io/trace#StartSpan + obj, ok := info.Uses[sel.Sel] + return spanOpenCensus, ok && obj.Pkg().Path() == "go.opencensus.io/trace" + case "StartSpanWithRemoteParent": // https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/trace/trace_api.go#L66 + obj, ok := info.Uses[sel.Sel] + return spanOpenCensus, ok && obj.Pkg().Path() == "go.opencensus.io/trace" + default: + return spanUnset, false + } +} + +func isCall(n ast.Node) bool { + _, ok := n.(*ast.CallExpr) + return ok +} + +func getID(node ast.Node) *ast.Ident { + switch stmt := node.(type) { + case *ast.ValueSpec: + if len(stmt.Names) > 1 { + return stmt.Names[1] + } + case *ast.AssignStmt: + if len(stmt.Lhs) > 1 { + id, _ := stmt.Lhs[1].(*ast.Ident) + return id + } + } + return nil +} + +// missingSpanCalls finds a path through the CFG, from stmt (which defines +// the 'span' variable v) to a return statement, that doesn't call the passed selector on the span. +func missingSpanCalls( + pass *analysis.Pass, + g *cfg.CFG, + sv spanVar, + selName string, + checkErr func(pass *analysis.Pass, ret *ast.ReturnStmt) *ast.ReturnStmt, + ignoreCheckSig *regexp.Regexp, +) *ast.ReturnStmt { + // usesCall reports whether stmts contain a use of the selName call on variable v. + usesCall := func(pass *analysis.Pass, stmts []ast.Node) bool { + found, reAssigned := false, false + for _, subStmt := range stmts { + stack := []ast.Node{} + ast.Inspect(subStmt, func(n ast.Node) bool { + switch n := n.(type) { + case *ast.FuncLit: + if len(stack) > 0 { + return false // don't stray into nested functions + } + case *ast.CallExpr: + if ident, ok := n.Fun.(*ast.Ident); ok { + fnSig := pass.TypesInfo.ObjectOf(ident).String() + if ignoreCheckSig != nil && ignoreCheckSig.MatchString(fnSig) { + found = true + return false + } + } + case nil: + stack = stack[:len(stack)-1] // pop + return true + } + stack = append(stack, n) // push + + // Check whether the span was assigned over top of its old value. + _, spanStart := isSpanStart(pass.TypesInfo, n) + if spanStart { + if id := getID(stack[len(stack)-3]); id != nil && id.Obj.Decl == sv.id.Obj.Decl { + reAssigned = true + return false + } + } + + if n, ok := n.(*ast.SelectorExpr); ok { + // Selector (End, SetStatus, RecordError) hit. + if n.Sel.Name == selName { + id, ok := n.X.(*ast.Ident) + found = ok && id.Obj.Decl == sv.id.Obj.Decl + } + + // Check if an ignore signature matches. + fnSig := pass.TypesInfo.ObjectOf(n.Sel).String() + if ignoreCheckSig != nil && ignoreCheckSig.MatchString(fnSig) { + found = true + } + } + + return !found + }) + } + return found && !reAssigned + } + + // blockUses computes "uses" for each block, caching the result. + memo := make(map[*cfg.Block]bool) + blockUses := func(pass *analysis.Pass, b *cfg.Block) bool { + res, ok := memo[b] + if !ok { + res = usesCall(pass, b.Nodes) + memo[b] = res + } + return res + } + + // Find the var's defining block in the CFG, + // plus the rest of the statements of that block. + var defBlock *cfg.Block + var rest []ast.Node +outer: + for _, b := range g.Blocks { + for i, n := range b.Nodes { + if n == sv.stmt { + defBlock = b + rest = b.Nodes[i+1:] + break outer + } + } + } + if defBlock == nil { + log.Default().Print("[ERROR] internal error: can't find defining block for span var") + } + + // Is the call "used" in the remainder of its defining block? + if usesCall(pass, rest) { + return nil + } + + // Does the defining block return without making the call? + if ret := defBlock.Return(); ret != nil { + return checkErr(pass, ret) + } + + // Search the CFG depth-first for a path, from defblock to a + // return block, in which v is never "used". + seen := make(map[*cfg.Block]bool) + var search func(blocks []*cfg.Block) *ast.ReturnStmt + search = func(blocks []*cfg.Block) *ast.ReturnStmt { + for _, b := range blocks { + if seen[b] { + continue + } + seen[b] = true + + // Prune the search if the block uses v. + if blockUses(pass, b) { + continue + } + + // Found path to return statement? + if ret := returnsErr(pass, b.Return()); ret != nil { + return ret // found + } + + // Recur + if ret := returnsErr(pass, search(b.Succs)); ret != nil { + return ret + } + } + return nil + } + + return search(defBlock.Succs) +} + +func returnsErr(pass *analysis.Pass, ret *ast.ReturnStmt) *ast.ReturnStmt { + if ret == nil { + return nil + } + + for _, r := range ret.Results { + if isErrorType(pass.TypesInfo.TypeOf(r)) { + return ret + } + + if r, ok := r.(*ast.CallExpr); ok { + for _, err := range errorsByArg(pass, r) { + if err { + return ret + } + } + } + } + + return nil +} + +// errorsByArg returns a slice s such that +// len(s) == number of return types of call +// s[i] == true iff return type at position i from left is an error type +// +// copied from https://github.com/kisielk/errcheck/blob/master/errcheck/errcheck.go +func errorsByArg(pass *analysis.Pass, call *ast.CallExpr) []bool { + switch t := pass.TypesInfo.Types[call].Type.(type) { + case *types.Named: + // Single return + return []bool{isErrorType(t)} + case *types.Pointer: + // Single return via pointer + return []bool{isErrorType(t)} + case *types.Tuple: + // Multiple returns + s := make([]bool, t.Len()) + for i := 0; i < t.Len(); i++ { + switch et := t.At(i).Type().(type) { + case *types.Named: + // Single return + s[i] = isErrorType(et) + case *types.Pointer: + // Single return via pointer + s[i] = isErrorType(et) + default: + s[i] = false + } + } + return s + } + return []bool{false} +} + +func isErrorType(t types.Type) bool { + return types.Implements(t, errorType) +} diff --git a/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go b/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go index 68593cc9ad..82ab6298a9 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/analyzer.go @@ -31,7 +31,6 @@ func init() { } func runAnalyzer(pass *analysis.Pass) (interface{}, error) { - exclude := map[string]bool{} if !argExcludeOnly { for _, name := range DefaultExcludedSymbols { @@ -65,8 +64,9 @@ func runAnalyzer(pass *analysis.Pass) (interface{}, error) { for _, err := range v.errors { pass.Report(analysis.Diagnostic{ - Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), - Message: "unchecked error", + Pos: pass.Fset.File(f.Pos()).Pos(err.Pos.Offset), + Message: "unchecked error", + Category: "errcheck", }) } diff --git a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go index 0a4067f928..d61d348f77 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go @@ -167,7 +167,7 @@ func (c *Checker) LoadPackages(paths ...string) ([]*packages.Package, error) { buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", c.Mod)) } cfg := &packages.Config{ - Mode: packages.LoadAllSyntax, + Mode: packages.NeedSyntax | packages.NeedTypes | packages.NeedTypesInfo, Tests: !c.Exclusions.TestFiles, BuildFlags: buildFlags, } @@ -205,7 +205,7 @@ func (c *Checker) CheckPackage(pkg *packages.Package) Result { ignore := map[string]*regexp.Regexp{} // Apply SymbolRegexpsByPackage first so that if the same path appears in - // Packages, a more narrow regexp will be superceded by dotStar below. + // Packages, a more narrow regexp will be superseded by dotStar below. if regexps := c.Exclusions.SymbolRegexpsByPackage; regexps != nil { for pkg, re := range regexps { // TODO warn if previous entry overwritten? @@ -337,7 +337,7 @@ func (v *visitor) selectorName(call *ast.CallExpr) string { // names are returned. If the function is package-qualified (like "fmt.Printf()") // then just that function's fullName is returned. // -// Otherwise, we walk through all the potentially embeddded interfaces of the receiver +// Otherwise, we walk through all the potentially embedded interfaces of the receiver // the collect a list of type-qualified function names that we will check. func (v *visitor) namesForExcludeCheck(call *ast.CallExpr) []string { sel, fn, ok := v.selectorAndFunc(call) @@ -569,73 +569,98 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { if !v.ignoreCall(stmt.Call) && v.callReturnsError(stmt.Call) { v.addErrorAtPosition(stmt.Call.Lparen, stmt.Call) } + case *ast.GenDecl: + if stmt.Tok != token.VAR { + break + } + + for _, spec := range stmt.Specs { + vspec := spec.(*ast.ValueSpec) + + if len(vspec.Values) == 0 { + // ignore declarations w/o assignments + continue + } + + var lhs []ast.Expr + for _, name := range vspec.Names { + lhs = append(lhs, ast.Expr(name)) + } + v.checkAssignment(lhs, vspec.Values) + } + case *ast.AssignStmt: - if len(stmt.Rhs) == 1 { - // single value on rhs; check against lhs identifiers - if call, ok := stmt.Rhs[0].(*ast.CallExpr); ok { - if !v.blank { - break - } - if v.ignoreCall(call) { - break - } - isError := v.errorsByArg(call) - for i := 0; i < len(stmt.Lhs); i++ { - if id, ok := stmt.Lhs[i].(*ast.Ident); ok { - // We shortcut calls to recover() because errorsByArg can't - // check its return types for errors since it returns interface{}. - if id.Name == "_" && (v.isRecover(call) || isError[i]) { - v.addErrorAtPosition(id.NamePos, call) - } + v.checkAssignment(stmt.Lhs, stmt.Rhs) + + default: + } + return v +} + +func (v *visitor) checkAssignment(lhs, rhs []ast.Expr) { + if len(rhs) == 1 { + // single value on rhs; check against lhs identifiers + if call, ok := rhs[0].(*ast.CallExpr); ok { + if !v.blank { + return + } + if v.ignoreCall(call) { + return + } + isError := v.errorsByArg(call) + for i := 0; i < len(lhs); i++ { + if id, ok := lhs[i].(*ast.Ident); ok { + // We shortcut calls to recover() because errorsByArg can't + // check its return types for errors since it returns interface{}. + if id.Name == "_" && (v.isRecover(call) || isError[i]) { + v.addErrorAtPosition(id.NamePos, call) } } - } else if assert, ok := stmt.Rhs[0].(*ast.TypeAssertExpr); ok { - if !v.asserts { - break - } - if assert.Type == nil { - // type switch - break - } - if len(stmt.Lhs) < 2 { - // assertion result not read - v.addErrorAtPosition(stmt.Rhs[0].Pos(), nil) - } else if id, ok := stmt.Lhs[1].(*ast.Ident); ok && v.blank && id.Name == "_" { - // assertion result ignored - v.addErrorAtPosition(id.NamePos, nil) - } } - } else { - // multiple value on rhs; in this case a call can't return - // multiple values. Assume len(stmt.Lhs) == len(stmt.Rhs) - for i := 0; i < len(stmt.Lhs); i++ { - if id, ok := stmt.Lhs[i].(*ast.Ident); ok { - if call, ok := stmt.Rhs[i].(*ast.CallExpr); ok { - if !v.blank { - continue - } - if v.ignoreCall(call) { - continue - } - if id.Name == "_" && v.callReturnsError(call) { - v.addErrorAtPosition(id.NamePos, call) - } - } else if assert, ok := stmt.Rhs[i].(*ast.TypeAssertExpr); ok { - if !v.asserts { - continue - } - if assert.Type == nil { - // Shouldn't happen anyway, no multi assignment in type switches - continue - } - v.addErrorAtPosition(id.NamePos, nil) + } else if assert, ok := rhs[0].(*ast.TypeAssertExpr); ok { + if !v.asserts { + return + } + if assert.Type == nil { + // type switch + return + } + if len(lhs) < 2 { + // assertion result not read + v.addErrorAtPosition(rhs[0].Pos(), nil) + } else if id, ok := lhs[1].(*ast.Ident); ok && v.blank && id.Name == "_" { + // assertion result ignored + v.addErrorAtPosition(id.NamePos, nil) + } + } + } else { + // multiple value on rhs; in this case a call can't return + // multiple values. Assume len(lhs) == len(rhs) + for i := 0; i < len(lhs); i++ { + if id, ok := lhs[i].(*ast.Ident); ok { + if call, ok := rhs[i].(*ast.CallExpr); ok { + if !v.blank { + continue + } + if v.ignoreCall(call) { + continue + } + if id.Name == "_" && v.callReturnsError(call) { + v.addErrorAtPosition(id.NamePos, call) + } + } else if assert, ok := rhs[i].(*ast.TypeAssertExpr); ok { + if !v.asserts { + continue } + if assert.Type == nil { + // Shouldn't happen anyway, no multi assignment in type switches + continue + } + v.addErrorAtPosition(id.NamePos, nil) } } } - default: } - return v } func isErrorType(t types.Type) bool { diff --git a/vendor/github.com/kisielk/errcheck/errcheck/excludes.go b/vendor/github.com/kisielk/errcheck/errcheck/excludes.go index 22db9fe11d..a783b5a763 100644 --- a/vendor/github.com/kisielk/errcheck/errcheck/excludes.go +++ b/vendor/github.com/kisielk/errcheck/errcheck/excludes.go @@ -3,64 +3,60 @@ package errcheck import ( "bufio" "bytes" - "io/ioutil" + "os" "strings" ) -var ( - // DefaultExcludedSymbols is a list of symbol names that are usually excluded from checks by default. - // - // Note, that they still need to be explicitly copied to Checker.Exclusions.Symbols - DefaultExcludedSymbols = []string{ - // bytes - "(*bytes.Buffer).Write", - "(*bytes.Buffer).WriteByte", - "(*bytes.Buffer).WriteRune", - "(*bytes.Buffer).WriteString", +// DefaultExcludedSymbols is a list of symbol names that are usually excluded from checks by default. +// +// Note, that they still need to be explicitly copied to Checker.Exclusions.Symbols +var DefaultExcludedSymbols = []string{ + // bytes + "(*bytes.Buffer).Write", + "(*bytes.Buffer).WriteByte", + "(*bytes.Buffer).WriteRune", + "(*bytes.Buffer).WriteString", - // fmt - "fmt.Errorf", - "fmt.Print", - "fmt.Printf", - "fmt.Println", - "fmt.Fprint(*bytes.Buffer)", - "fmt.Fprintf(*bytes.Buffer)", - "fmt.Fprintln(*bytes.Buffer)", - "fmt.Fprint(*strings.Builder)", - "fmt.Fprintf(*strings.Builder)", - "fmt.Fprintln(*strings.Builder)", - "fmt.Fprint(os.Stderr)", - "fmt.Fprintf(os.Stderr)", - "fmt.Fprintln(os.Stderr)", + // fmt + "fmt.Print", + "fmt.Printf", + "fmt.Println", + "fmt.Fprint(*bytes.Buffer)", + "fmt.Fprintf(*bytes.Buffer)", + "fmt.Fprintln(*bytes.Buffer)", + "fmt.Fprint(*strings.Builder)", + "fmt.Fprintf(*strings.Builder)", + "fmt.Fprintln(*strings.Builder)", + "fmt.Fprint(os.Stderr)", + "fmt.Fprintf(os.Stderr)", + "fmt.Fprintln(os.Stderr)", - // io - "(*io.PipeReader).CloseWithError", - "(*io.PipeWriter).CloseWithError", + // io + "(*io.PipeReader).CloseWithError", + "(*io.PipeWriter).CloseWithError", - // math/rand - "math/rand.Read", - "(*math/rand.Rand).Read", + // math/rand + "math/rand.Read", + "(*math/rand.Rand).Read", - // strings - "(*strings.Builder).Write", - "(*strings.Builder).WriteByte", - "(*strings.Builder).WriteRune", - "(*strings.Builder).WriteString", + // strings + "(*strings.Builder).Write", + "(*strings.Builder).WriteByte", + "(*strings.Builder).WriteRune", + "(*strings.Builder).WriteString", - // hash - "(hash.Hash).Write", - } -) + // hash + "(hash.Hash).Write", +} // ReadExcludes reads an excludes file, a newline delimited file that lists // patterns for which to allow unchecked errors. // // Lines that start with two forward slashes are considered comments and are ignored. -// func ReadExcludes(path string) ([]string, error) { var excludes []string - buf, err := ioutil.ReadFile(path) + buf, err := os.ReadFile(path) if err != nil { return nil, err } diff --git a/vendor/github.com/kkHAIKE/contextcheck/.gitignore b/vendor/github.com/kkHAIKE/contextcheck/.gitignore new file mode 100644 index 0000000000..fc1b400c8a --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea +.DS_Store diff --git a/vendor/github.com/kkHAIKE/contextcheck/LICENSE b/vendor/github.com/kkHAIKE/contextcheck/LICENSE new file mode 100644 index 0000000000..99e1c482a1 --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 sylvia.wang + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kkHAIKE/contextcheck/Makefile b/vendor/github.com/kkHAIKE/contextcheck/Makefile new file mode 100644 index 0000000000..9321e9de39 --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/Makefile @@ -0,0 +1,5 @@ +build: + @GO111MODULE=on go build -ldflags '-s -w' -o contextcheck ./cmd/contextcheck/main.go + +install: + @GO111MODULE=on go install -ldflags '-s -w' ./cmd/contextcheck diff --git a/vendor/github.com/kkHAIKE/contextcheck/README.md b/vendor/github.com/kkHAIKE/contextcheck/README.md new file mode 100644 index 0000000000..2cc7b2e489 --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/README.md @@ -0,0 +1,157 @@ +[![CircleCI](https://circleci.com/gh/sylvia7788/contextcheck.svg?style=svg)](https://circleci.com/gh/sylvia7788/contextcheck) + + +# contextcheck + +`contextcheck` is a static analysis tool, it is used to check whether the function uses a non-inherited context, which will result in a broken call link. + +For example: + +```go +func call1(ctx context.Context) { + ... + + ctx = getNewCtx(ctx) + call2(ctx) // OK + + call2(context.Background()) // Non-inherited new context, use function like `context.WithXXX` instead + + call3() // Function `call3` should pass the context parameter + call4() // Function `call4->call3` should pass the context parameter + ... +} + +func call2(ctx context.Context) { + ... +} + +func call3() { + ctx := context.TODO() + call2(ctx) +} + +func call4() { + call3() +} + + +// if you want none-inherit ctx, use this function +func getNewCtx(ctx context.Context) (newCtx context.Context) { + ... + return +} + +/* ---------- check net/http.HandleFunc ---------- */ + +func call5(ctx context.Context, w http.ResponseWriter, r *http.Request) { +} + +func call6(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + call5(ctx, w, r) + call5(context.Background(), w, r) // Non-inherited new context, use function like `context.WithXXX` or `r.Context` instead +} + +func call7(in bool, w http.ResponseWriter, r *http.Request) { + call5(r.Context(), w, r) + call5(context.Background(), w, r) +} + +func call8() { + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + call5(r.Context(), w, r) + call5(context.Background(), w, r) // Non-inherited new context, use function like `context.WithXXX` or `r.Context` instead + + call6(w, r) + + // call7 should be like `func call7(ctx context.Context, in bool, w http.ResponseWriter, r *http.Request)` + call7(true, w, r) // Function `call7` should pass the context parameter + }) +} +``` + +## Tips +### need break ctx inheritance +eg: [issue](https://github.com/kkHAIKE/contextcheck/issues/2). + +```go +func call1(ctx context.Context) { + ... + + newCtx, cancel := NoInheritCancel(ctx) + defer cancel() + + call2(newCtx) + ... +} + +func call2(ctx context.Context) { + ... +} + +func NoInheritCancel(_ context.Context) (context.Context,context.CancelFunc) { + return context.WithCancel(context.Background()) +} +``` + +### skip check specify function +You can add `// nolint: contextcheck` in function decl doc comment, to skip this linter in some false-positive case. + +```go +// nolint: contextcheck +func call1() { + doSomeThing(context.Background()) // add nolint will no issuss for that +} + +func call2(ctx context.Context) { + call1() +} + +func call3() { + call2(context.Background()) +} +``` + +### force mark specify function have server-side http.Request parameter +default behavior is mark http.HandlerFunc or a function use r.Context(). + +```go +// @contextcheck(req_has_ctx) +func writeErr(w http.ResponseWriter, r *http.Request, err error) { + doSomeThing(r.Context()) +} + +func handler(w http.ResponseWriter, r *http.Request) { + ... + if err != nil { + writeErr(w, r, err) + return + } + ... +} +``` + +## Installation + +You can get `contextcheck` by `go get` command. + +```bash +$ go get -u github.com/kkHAIKE/contextcheck +``` + +or build yourself. + +```bash +$ make build +$ make install +``` + +## Usage + +Invoke `contextcheck` with your package name + +```bash +$ contextcheck ./... +$ # or +$ go vet -vettool=`which contextcheck` ./... +``` diff --git a/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go b/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go new file mode 100644 index 0000000000..c9ad0101fe --- /dev/null +++ b/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go @@ -0,0 +1,835 @@ +package contextcheck + +import ( + "go/ast" + "go/types" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/gostaticanalysis/analysisutil" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/go/ssa" +) + +type Configuration struct { + DisableFact bool +} + +var pkgprefix string + +func NewAnalyzer(cfg Configuration) *analysis.Analyzer { + analyzer := &analysis.Analyzer{ + Name: "contextcheck", + Doc: "check whether the function uses a non-inherited context", + Run: NewRun(nil, cfg.DisableFact), + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, + } + analyzer.Flags.StringVar(&pkgprefix, "pkgprefix", "", "filter init pkgs (only for cmd)") + + if !cfg.DisableFact { + analyzer.FactTypes = append(analyzer.FactTypes, (*ctxFact)(nil)) + } + + return analyzer +} + +const ( + ctxPkg = "context" + ctxName = "Context" + + httpPkg = "net/http" + httpRes = "ResponseWriter" + httpReq = "Request" +) + +const ( + CtxIn int = 1 << iota // ctx in function's param + CtxOut // ctx in function's results + CtxInField // ctx in function's field param +) + +type entryType int + +const ( + EntryNone entryType = iota + EntryNormal // without ctx in + EntryWithCtx // has ctx in + EntryWithHttpHandler // is http handler +) + +var ( + pkgFactMap = make(map[*types.Package]ctxFact) + pkgFactMu sync.RWMutex +) + +type resInfo struct { + Valid bool + Funcs []string + + // reuse for doc + ReqCtx bool + Skip bool + + EntryType entryType +} + +type ctxFact map[string]resInfo + +func (*ctxFact) String() string { return "ctxCheck" } +func (*ctxFact) AFact() {} + +type runner struct { + pass *analysis.Pass + ctxTyp *types.Named + ctxPTyp *types.Pointer + skipFile map[*ast.File]bool + + httpResTyps []types.Type + httpReqTyps []types.Type + + currentFact ctxFact + disableFact bool +} + +func getPkgRoot(pkg string) string { + arr := strings.Split(pkg, "/") + if len(arr) < 3 { + return arr[0] + } + if strings.IndexByte(arr[0], '.') == -1 { + return arr[0] + } + return strings.Join(arr[:3], "/") +} + +func NewRun(pkgs []*packages.Package, disableFact bool) func(pass *analysis.Pass) (interface{}, error) { + m := make(map[string]bool) + for _, pkg := range pkgs { + m[getPkgRoot(pkg.PkgPath)] = true + } + return func(pass *analysis.Pass) (interface{}, error) { + // skip different repo + if len(m) > 0 && !m[getPkgRoot(pass.Pkg.Path())] { + return nil, nil + } + if len(m) == 0 && pkgprefix != "" && !strings.HasPrefix(pass.Pkg.Path(), pkgprefix) { + return nil, nil + } + + r := &runner{disableFact: disableFact} + r.run(pass) + return nil, nil + } +} + +func (r *runner) run(pass *analysis.Pass) { + r.pass = pass + pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + funcs := pssa.SrcFuncs + + // collect ctx obj + var ok bool + r.ctxTyp, r.ctxPTyp, ok = r.getRequiedType(pssa, ctxPkg, ctxName) + if !ok { + return + } + + // collect http obj + r.collectHttpTyps(pssa) + + r.skipFile = make(map[*ast.File]bool) + r.currentFact = make(ctxFact) + + type entryInfo struct { + f *ssa.Function // entryfunc + tp entryType // entrytype + } + var tmpFuncs []entryInfo + for _, f := range funcs { + // skip checked function + key := f.RelString(nil) + if _, ok := r.currentFact[key]; ok { + continue + } + + if entryType := r.checkIsEntry(f); entryType == EntryNormal { + if _, ok := r.getValue(key, f); ok { + continue + } + // record the result of nomal function + checkingMap := make(map[string]bool) + checkingMap[key] = true + r.setFact(key, r.checkFuncWithoutCtx(f, checkingMap), f.Name()) + continue + } else if entryType == EntryWithCtx || entryType == EntryWithHttpHandler { + tmpFuncs = append(tmpFuncs, entryInfo{f: f, tp: entryType}) + } + } + + for _, v := range tmpFuncs { + r.checkFuncWithCtx(v.f, v.tp) + } + + if len(r.currentFact) > 0 { + if r.disableFact { + setPkgFact(pass.Pkg, r.currentFact) + } else { + pass.ExportPackageFact(&r.currentFact) + } + } +} + +func (r *runner) getRequiedType(pssa *buildssa.SSA, path, name string) (obj *types.Named, pobj *types.Pointer, ok bool) { + pkg := pssa.Pkg.Prog.ImportedPackage(path) + if pkg == nil { + return + } + + objTyp := pkg.Type(name) + if objTyp == nil { + return + } + obj, ok = objTyp.Object().Type().(*types.Named) + if !ok { + return + } + pobj = types.NewPointer(obj) + + return +} + +func (r *runner) collectHttpTyps(pssa *buildssa.SSA) { + objRes, _, ok := r.getRequiedType(pssa, httpPkg, httpRes) + if ok { + r.httpResTyps = append(r.httpResTyps, objRes) + } + + _, pobjReq, ok := r.getRequiedType(pssa, httpPkg, httpReq) + if ok { + r.httpReqTyps = append(r.httpReqTyps, pobjReq) + } +} + +func (r *runner) noImportedContextAndHttp(f *ssa.Function) (ret bool) { + if !f.Pos().IsValid() { + return false + } + + file := analysisutil.File(r.pass, f.Pos()) + if file == nil { + return false + } + + if skip, has := r.skipFile[file]; has { + return skip + } + defer func() { + r.skipFile[file] = ret + }() + + for _, impt := range file.Imports { + path, err := strconv.Unquote(impt.Path.Value) + if err != nil { + continue + } + path = analysisutil.RemoveVendor(path) + if path == ctxPkg || path == httpPkg { + return false + } + } + + return true +} + +func (r *runner) checkIsEntry(f *ssa.Function) (ret entryType) { + // if r.noImportedContextAndHttp(f) { + // return EntryNormal + // } + key := "entry:" + f.RelString(nil) + res, ok := r.getValue(key, f) + if ok { + return res.EntryType + } + defer func() { + r.currentFact[key] = resInfo{EntryType: ret} + }() + + ctxIn, ctxOut := r.checkIsCtx(f) + if ctxOut { + // skip the function which generate ctx + return EntryNone + } else if ctxIn { + // has ctx in, ignore *http.Request.Context() + return EntryWithCtx + } + + reqctx, skip := r.docFlag(f) + + // check is `func handler(w http.ResponseWriter, r *http.Request) {}` + // or use '// @contextcheck(req_has_ctx)' + if r.checkIsHttpHandler(f, reqctx) { + return EntryWithHttpHandler + } + + if skip { + return EntryNone + } + + return EntryNormal +} + +func (r *runner) docFlag(f *ssa.Function) (reqctx, skip bool) { + for _, v := range r.getDocFromFunc(f) { + if len(nolintRe.FindString(v.Text)) > 0 && strings.Contains(v.Text, "contextcheck") { + skip = true + } else if strings.HasPrefix(v.Text, "// @contextcheck(req_has_ctx)") { + reqctx = true + } + } + return +} + +var nolintRe = regexp.MustCompile(`^//\s?nolint:`) + +func (r *runner) getDocFromFunc(f *ssa.Function) []*ast.Comment { + file := analysisutil.File(r.pass, f.Pos()) + if file == nil { + return nil + } + + // only support FuncDecl comment + var fd *ast.FuncDecl + for _, v := range file.Decls { + if tmp, ok := v.(*ast.FuncDecl); ok && tmp.Name.Pos() == f.Pos() { + fd = tmp + break + } + } + if fd == nil || fd.Doc == nil || len(fd.Doc.List) == 0 { + return nil + } + return fd.Doc.List +} + +func (r *runner) checkIsCtx(f *ssa.Function) (in, out bool) { + // check params + tuple := f.Signature.Params() + for i := 0; i < tuple.Len(); i++ { + if r.isCtxType(tuple.At(i).Type()) { + in = true + break + } + } + + // check freevars + for _, param := range f.FreeVars { + if r.isCtxType(param.Type()) { + in = true + break + } + } + + // check results + tuple = f.Signature.Results() + for i := 0; i < tuple.Len(); i++ { + if r.isCtxType(tuple.At(i).Type()) { + out = true + break + } + } + return +} + +func (r *runner) checkIsHttpHandler(f *ssa.Function, reqctx bool) bool { + var hasReq bool + tuple := f.Signature.Params() + for i := 0; i < tuple.Len(); i++ { + if r.isHttpReqType(tuple.At(i).Type()) { + hasReq = true + break + } + } + if !hasReq { + return false + } + if reqctx { + return true + } + + // must be `func f(w http.ResponseWriter, r *http.Request) {}` + if f.Signature.Results().Len() == 0 && tuple.Len() == 2 && + r.isHttpResType(tuple.At(0).Type()) && r.isHttpReqType(tuple.At(1).Type()) { + return true + } + + // check if use r.Context() + return f.Blocks != nil && len(r.getHttpReqCtx(f, true)) > 0 +} + +func (r *runner) collectCtxRef(f *ssa.Function, isHttpHandler bool) (refMap map[ssa.Instruction]bool, ok bool) { + ok = true + refMap = make(map[ssa.Instruction]bool) + checkedRefMap := make(map[ssa.Value]bool) + storeInstrs := make(map[*ssa.Store]bool) + phiInstrs := make(map[*ssa.Phi]bool) + + var checkRefs func(val ssa.Value, fromAddr bool) + var checkInstr func(instr ssa.Instruction, fromAddr bool) + + checkRefs = func(val ssa.Value, fromAddr bool) { + if val == nil || val.Referrers() == nil { + return + } + + if checkedRefMap[val] { + return + } + checkedRefMap[val] = true + + for _, instr := range *val.Referrers() { + checkInstr(instr, fromAddr) + } + } + + checkInstr = func(instr ssa.Instruction, fromAddr bool) { + switch i := instr.(type) { + case ssa.CallInstruction: + refMap[i] = true + tp := r.getCallInstrCtxType(i) + if tp&CtxOut != 0 { + // collect referrers of the results + checkRefs(i.Value(), false) + return + } + case *ssa.Store: + if fromAddr { + // collect all store to judge whether it's right value is valid + storeInstrs[i] = true + } else { + checkRefs(i.Addr, true) + } + case *ssa.UnOp: + checkRefs(i, false) + case *ssa.MakeClosure: + for _, param := range i.Bindings { + if r.isCtxType(param.Type()) { + refMap[i] = true + break + } + } + case *ssa.Extract: + // only care about ctx + if r.isCtxType(i.Type()) { + checkRefs(i, false) + } + case *ssa.Phi: + phiInstrs[i] = true + checkRefs(i, false) + case *ssa.TypeAssert: + // ctx.(*bm.Context) + } + } + + if isHttpHandler { + for _, v := range r.getHttpReqCtx(f, false) { + checkRefs(v, false) + } + } else { + for _, param := range f.Params { + if r.isCtxType(param.Type()) { + checkRefs(param, false) + } + } + + for _, param := range f.FreeVars { + if r.isCtxType(param.Type()) { + checkRefs(param, false) + } + } + } + + for instr := range storeInstrs { + if !checkedRefMap[instr.Val] { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` instead") + ok = false + } + } + + for instr := range phiInstrs { + for _, v := range instr.Edges { + if !checkedRefMap[v] { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` instead") + ok = false + } + } + } + + return +} + +func (r *runner) getHttpReqCtx(f *ssa.Function, least1 bool) (rets []ssa.Value) { + checkedRefMap := make(map[ssa.Value]bool) + + var checkRefs func(val ssa.Value, fromAddr bool) + var checkInstr func(instr ssa.Instruction, fromAddr bool) + + checkRefs = func(val ssa.Value, fromAddr bool) { + if val == nil || val.Referrers() == nil { + return + } + + if checkedRefMap[val] { + return + } + checkedRefMap[val] = true + + for _, instr := range *val.Referrers() { + checkInstr(instr, fromAddr) + } + } + + checkInstr = func(instr ssa.Instruction, fromAddr bool) { + switch i := instr.(type) { + case ssa.CallInstruction: + // r.Context() only has one recv + if len(i.Common().Args) != 1 { + break + } + + // find r.Context() + if r.getCallInstrCtxType(i)&CtxOut != CtxOut { + break + } + + // check is r.Context + f := r.getFunction(instr) + if f == nil || f.Name() != ctxName { + break + } + if f.Signature.Recv() != nil { + // collect the return of r.Context + rets = append(rets, i.Value()) + if least1 { + return + } + } + case *ssa.Store: + if !fromAddr { + checkRefs(i.Addr, true) + } + case *ssa.UnOp: + checkRefs(i, false) + case *ssa.Phi: + checkRefs(i, false) + case *ssa.MakeClosure: + case *ssa.Extract: + // http.Request can only be input + } + } + + for _, param := range f.Params { + if r.isHttpReqType(param.Type()) { + checkRefs(param, false) + } + } + + return +} + +func (r *runner) checkFuncWithCtx(f *ssa.Function, tp entryType) { + isHttpHandler := tp == EntryWithHttpHandler + refMap, ok := r.collectCtxRef(f, isHttpHandler) + if !ok { + return + } + + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + tp, ok := r.getCtxType(instr) + if !ok { + continue + } + + // checked in collectCtxRef, skipped + if tp&CtxOut != 0 { + continue + } + + if tp&CtxIn != 0 { + if !refMap[instr] { + if isHttpHandler { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` or `r.Context` instead") + } else { + r.pass.Reportf(instr.Pos(), "Non-inherited new context, use function like `context.WithXXX` instead") + } + } + } + + ff := r.getFunction(instr) + if ff == nil { + continue + } + + key := ff.RelString(nil) + res, ok := r.getValue(key, ff) + if ok { + if !res.Valid { + r.pass.Reportf(instr.Pos(), "Function `%s` should pass the context parameter", strings.Join(reverse(res.Funcs), "->")) + } + } + } + } +} + +func (r *runner) checkFuncWithoutCtx(f *ssa.Function, checkingMap map[string]bool) (ret bool) { + ret = true + orgKey := f.RelString(nil) + var seted bool + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + tp, ok := r.getCtxType(instr) + if !ok { + continue + } + + if tp&CtxOut != 0 { + continue + } + + // it is considered illegal as long as ctx is in the input and not in *struct X + if tp&CtxIn != 0 { + if tp&CtxInField == 0 { + ret = false + } + } + + ff := r.getFunction(instr) + if ff == nil { + continue + } + + key := ff.RelString(nil) + res, ok := r.getValue(key, ff) + if ok { + if !res.Valid { + ret = false + + // save the call link + if !seted { + seted = true + r.setFact(orgKey, res.Valid, res.Funcs...) + } + } + continue + } + + // check is thunk or bound + if strings.HasSuffix(key, "$thunk") || strings.HasSuffix(key, "$bound") { + continue + } + + if entryType := r.checkIsEntry(ff); entryType == EntryNormal { + // cannot get info from fact, skip + if ff.Blocks == nil { + continue + } + + // handler cycle call + if checkingMap[key] { + continue + } + checkingMap[key] = true + + valid := r.checkFuncWithoutCtx(ff, checkingMap) + r.setFact(key, valid, ff.Name()) + if res, ok := r.getValue(key, ff); ok && !valid && !seted { + seted = true + r.setFact(orgKey, valid, res.Funcs...) + } + if !valid { + ret = false + } + } + } + } + return ret +} + +func (r *runner) getCtxType(instr ssa.Instruction) (tp int, ok bool) { + switch i := instr.(type) { + case ssa.CallInstruction: + tp = r.getCallInstrCtxType(i) + ok = true + case *ssa.MakeClosure: + tp = r.getMakeClosureCtxType(i) + ok = true + } + return +} + +func (r *runner) getCallInstrCtxType(c ssa.CallInstruction) (tp int) { + // check params + for _, v := range c.Common().Args { + if r.isCtxType(v.Type()) { + if vv, ok := v.(*ssa.UnOp); ok { + if _, ok := vv.X.(*ssa.FieldAddr); ok { + tp |= CtxInField + } + } + + tp |= CtxIn + break + } + } + + // check results + if v := c.Value(); v != nil { + if r.isCtxType(v.Type()) { + tp |= CtxOut + } else { + tuple, ok := v.Type().(*types.Tuple) + if !ok { + return + } + for i := 0; i < tuple.Len(); i++ { + if r.isCtxType(tuple.At(i).Type()) { + tp |= CtxOut + break + } + } + } + } + + return +} + +func (r *runner) getMakeClosureCtxType(c *ssa.MakeClosure) (tp int) { + for _, v := range c.Bindings { + if r.isCtxType(v.Type()) { + if vv, ok := v.(*ssa.UnOp); ok { + if _, ok := vv.X.(*ssa.FieldAddr); ok { + tp |= CtxInField + } + } + + tp |= CtxIn + break + } + } + return +} + +func (r *runner) getFunction(instr ssa.Instruction) (f *ssa.Function) { + switch i := instr.(type) { + case ssa.CallInstruction: + if i.Common().IsInvoke() { + return + } + + switch c := i.Common().Value.(type) { + case *ssa.Function: + f = c + case *ssa.MakeClosure: + // captured in the outer layer + case *ssa.Builtin, *ssa.UnOp, *ssa.Lookup, *ssa.Phi: + // skipped + case *ssa.Extract, *ssa.Call: + // function is a result of a call, skipped + case *ssa.Parameter: + // function is a param, skipped + } + case *ssa.MakeClosure: + f = i.Fn.(*ssa.Function) + } + return +} + +func (r *runner) isCtxType(tp types.Type) bool { + return types.Identical(tp, r.ctxTyp) || types.Identical(tp, r.ctxPTyp) +} + +func (r *runner) isHttpResType(tp types.Type) bool { + for _, v := range r.httpResTyps { + if ok := types.Identical(v, v); ok { + return true + } + } + return false +} + +func (r *runner) isHttpReqType(tp types.Type) bool { + for _, v := range r.httpReqTyps { + if ok := types.Identical(tp, v); ok { + return true + } + } + return false +} + +func (r *runner) getValue(key string, f *ssa.Function) (res resInfo, ok bool) { + res, ok = r.currentFact[key] + if ok { + return + } + + if f.Pkg == nil { + return + } + + var fact ctxFact + var got bool + if r.disableFact { + fact, got = getPkgFact(f.Pkg.Pkg) + } else { + got = r.pass.ImportPackageFact(f.Pkg.Pkg, &fact) + } + if got { + res, ok = fact[key] + } + return +} + +func (r *runner) setFact(key string, valid bool, funcs ...string) { + var names []string + if !valid { + names = append(r.currentFact[key].Funcs, funcs...) + } + r.currentFact[key] = resInfo{ + Valid: valid, + Funcs: names, + } +} + +// setPkgFact save fact to mem +func setPkgFact(pkg *types.Package, fact ctxFact) { + pkgFactMu.Lock() + pkgFactMap[pkg] = fact + pkgFactMu.Unlock() +} + +// getPkgFact get fact from mem +func getPkgFact(pkg *types.Package) (fact ctxFact, ok bool) { + pkgFactMu.RLock() + fact, ok = pkgFactMap[pkg] + pkgFactMu.RUnlock() + return +} + +func reverse(arr1 []string) (arr2 []string) { + l := len(arr1) + if l == 0 { + return + } + arr2 = make([]string, l) + for i := 0; i <= l/2; i++ { + arr2[i] = arr1[l-1-i] + arr2[l-1-i] = arr1[i] + } + return +} diff --git a/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go b/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go index c7da52a200..1aecc1a515 100644 --- a/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go +++ b/vendor/github.com/kunwardeep/paralleltest/pkg/paralleltest/paralleltest.go @@ -7,7 +7,6 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" ) @@ -16,28 +15,36 @@ It also checks that the t.Parallel is used if multiple tests cases are run as pa As part of ensuring parallel tests works as expected it checks for reinitialising of the range value over the test cases.(https://tinyurl.com/y6555cy6)` -var Analyzer = &analysis.Analyzer{ - Name: "paralleltest", - Doc: Doc, - Run: run, - Flags: flags(), - Requires: []*analysis.Analyzer{inspect.Analyzer}, +func NewAnalyzer() *analysis.Analyzer { + return newParallelAnalyzer().analyzer } -const ignoreMissingFlag = "i" - -func flags() flag.FlagSet { - options := flag.NewFlagSet("", flag.ExitOnError) - options.Bool(ignoreMissingFlag, false, "ignore missing calls to t.Parallel") - return *options +// parallelAnalyzer is an internal analyzer that makes options available to a +// run pass. It wraps an `analysis.Analyzer` that should be returned for +// linters. +type parallelAnalyzer struct { + analyzer *analysis.Analyzer + ignoreMissing bool + ignoreMissingSubtests bool } -type boolValue bool +func newParallelAnalyzer() *parallelAnalyzer { + a := ¶llelAnalyzer{} -func run(pass *analysis.Pass) (interface{}, error) { + var flags flag.FlagSet + flags.BoolVar(&a.ignoreMissing, "i", false, "ignore missing calls to t.Parallel") + flags.BoolVar(&a.ignoreMissingSubtests, "ignoremissingsubtests", false, "ignore missing calls to t.Parallel in subtests") - ignoreMissing := pass.Analyzer.Flags.Lookup(ignoreMissingFlag).Value.(flag.Getter).Get().(bool) + a.analyzer = &analysis.Analyzer{ + Name: "paralleltest", + Doc: Doc, + Run: a.run, + Flags: flags, + } + return a +} +func (a *parallelAnalyzer) run(pass *analysis.Pass) (interface{}, error) { inspector := inspector.New(pass.Files) nodeFilter := []ast.Node{ @@ -47,8 +54,10 @@ func run(pass *analysis.Pass) (interface{}, error) { inspector.Preorder(nodeFilter, func(node ast.Node) { funcDecl := node.(*ast.FuncDecl) var funcHasParallelMethod, + funcCantParallelMethod, rangeStatementOverTestCasesExists, - rangeStatementHasParallelMethod bool + rangeStatementHasParallelMethod, + rangeStatementCantParallelMethod bool var loopVariableUsedInRun *string var numberOfTestRun int var positionOfTestRunNode []ast.Node @@ -70,20 +79,29 @@ func run(pass *analysis.Pass) (interface{}, error) { funcHasParallelMethod = methodParallelIsCalledInTestFunction(n, testVar) } + // Check if the test calls t.Setenv, cannot be used in parallel tests or tests with parallel ancestors + if !funcCantParallelMethod { + funcCantParallelMethod = methodSetenvIsCalledInTestFunction(n, testVar) + } + // Check if the t.Run within the test function is calling t.Parallel if methodRunIsCalledInTestFunction(n, testVar) { // n is a call to t.Run; find out the name of the subtest's *testing.T parameter. innerTestVar := getRunCallbackParameterName(n) hasParallel := false + cantParallel := false numberOfTestRun++ ast.Inspect(v, func(p ast.Node) bool { if !hasParallel { hasParallel = methodParallelIsCalledInTestFunction(p, innerTestVar) } + if !cantParallel { + cantParallel = methodSetenvIsCalledInTestFunction(p, innerTestVar) + } return true }) - if !hasParallel { + if !hasParallel && !cantParallel { positionOfTestRunNode = append(positionOfTestRunNode, n) } } @@ -115,6 +133,10 @@ func run(pass *analysis.Pass) (interface{}, error) { rangeStatementHasParallelMethod = methodParallelIsCalledInMethodRun(r.X, innerTestVar) } + if !rangeStatementCantParallelMethod { + rangeStatementCantParallelMethod = methodSetenvIsCalledInMethodRun(r.X, innerTestVar) + } + if loopVariableUsedInRun == nil { if run, ok := r.X.(*ast.CallExpr); ok { loopVariableUsedInRun = loopVarReferencedInRun(run, loopVars, pass.TypesInfo) @@ -127,13 +149,18 @@ func run(pass *analysis.Pass) (interface{}, error) { } } - if !ignoreMissing && !funcHasParallelMethod { + // Descendents which call Setenv, also prevent tests from calling Parallel + if rangeStatementCantParallelMethod { + funcCantParallelMethod = true + } + + if !a.ignoreMissing && !funcHasParallelMethod && !funcCantParallelMethod { pass.Reportf(node.Pos(), "Function %s missing the call to method parallel\n", funcDecl.Name.Name) } if rangeStatementOverTestCasesExists && rangeNode != nil { - if !rangeStatementHasParallelMethod { - if !ignoreMissing { + if !rangeStatementHasParallelMethod && !rangeStatementCantParallelMethod { + if !a.ignoreMissing && !a.ignoreMissingSubtests { pass.Reportf(rangeNode.Pos(), "Range statement for test %s missing the call to method parallel in test Run\n", funcDecl.Name.Name) } } else if loopVariableUsedInRun != nil { @@ -142,10 +169,10 @@ func run(pass *analysis.Pass) (interface{}, error) { } // Check if the t.Run is more than one as there is no point making one test parallel - if !ignoreMissing { + if !a.ignoreMissing && !a.ignoreMissingSubtests { if numberOfTestRun > 1 && len(positionOfTestRunNode) > 0 { for _, n := range positionOfTestRunNode { - pass.Reportf(n.Pos(), "Function %s has missing the call to method parallel in the test run\n", funcDecl.Name.Name) + pass.Reportf(n.Pos(), "Function %s missing the call to method parallel in the test run\n", funcDecl.Name.Name) } } } @@ -155,15 +182,23 @@ func run(pass *analysis.Pass) (interface{}, error) { } func methodParallelIsCalledInMethodRun(node ast.Node, testVar string) bool { - var methodParallelCalled bool + return targetMethodIsCalledInMethodRun(node, testVar, "Parallel") +} + +func methodSetenvIsCalledInMethodRun(node ast.Node, testVar string) bool { + return targetMethodIsCalledInMethodRun(node, testVar, "Setenv") +} + +func targetMethodIsCalledInMethodRun(node ast.Node, testVar, targetMethod string) bool { + var called bool // nolint: gocritic switch callExp := node.(type) { case *ast.CallExpr: for _, arg := range callExp.Args { - if !methodParallelCalled { + if !called { ast.Inspect(arg, func(n ast.Node) bool { - if !methodParallelCalled { - methodParallelCalled = methodParallelIsCalledInRunMethod(n, testVar) + if !called { + called = exprCallHasMethod(n, testVar, targetMethod) return true } return false @@ -171,11 +206,7 @@ func methodParallelIsCalledInMethodRun(node ast.Node, testVar string) bool { } } } - return methodParallelCalled -} - -func methodParallelIsCalledInRunMethod(node ast.Node, testVar string) bool { - return exprCallHasMethod(node, testVar, "Parallel") + return called } func methodParallelIsCalledInTestFunction(node ast.Node, testVar string) bool { @@ -189,6 +220,11 @@ func methodRunIsCalledInRangeStatement(node ast.Node, testVar string) bool { func methodRunIsCalledInTestFunction(node ast.Node, testVar string) bool { return exprCallHasMethod(node, testVar, "Run") } + +func methodSetenvIsCalledInTestFunction(node ast.Node, testVar string) bool { + return exprCallHasMethod(node, testVar, "Setenv") +} + func exprCallHasMethod(node ast.Node, receiverName, methodName string) bool { // nolint: gocritic switch n := node.(type) { @@ -249,7 +285,9 @@ func isTestFunction(funcDecl *ast.FuncDecl) (bool, string) { if selectExpr, ok := starExp.X.(*ast.SelectorExpr); ok { if selectExpr.Sel.Name == testMethodStruct { if s, ok := selectExpr.X.(*ast.Ident); ok { - return s.Name == testMethodPackageType, param.Names[0].Name + if len(param.Names) > 0 { + return s.Name == testMethodPackageType, param.Names[0].Name + } } } } diff --git a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml index 22ff44040c..95d44aaac3 100644 --- a/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml +++ b/vendor/github.com/kyoh86/exportloopref/.goreleaser.yml @@ -1,43 +1,51 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema.json + project_name: exportloopref -release: - github: - owner: kyoh86 - name: exportloopref -brews: -- install: | - bin.install "exportloopref" - github: - owner: kyoh86 - name: homebrew-tap - folder: Formula - homepage: https://github.com/kyoh86/exportloopref - description: An analyzer that finds exporting pointers for loop variables. builds: -- goos: - - linux - - darwin - - windows - goarch: - - amd64 - - "386" - main: ./cmd/exportloopref - ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} - binary: exportloopref + - id: default + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + - "386" + main: ./cmd/exportloopref + binary: exportloopref +brews: + - install: | + bin.install "exportloopref" + tap: + owner: kyoh86 + name: homebrew-tap + folder: Formula + homepage: https://github.com/kyoh86/exportloopref + description: An analyzer that finds exporting pointers for loop variables. + license: MIT +nfpms: + - builds: + - default + maintainer: kyoh86 + homepage: https://github.com/kyoh86/exportloopref + description: An analyzer that finds exporting pointers for loop variables. + license: MIT + formats: + - apk + - deb + - rpm archives: -- id: gzip - format: tar.gz - format_overrides: - - goos: windows - format: zip - name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" - files: - - licence* - - LICENCE* - - license* - - LICENSE* - - readme* - - README* - - changelog* - - CHANGELOG* -snapshot: - name_template: SNAPSHOT-{{ .Commit }} + - id: gzip + format: tar.gz + format_overrides: + - goos: windows + format: zip + files: + - licence* + - LICENCE* + - license* + - LICENSE* + - readme* + - README* + - changelog* + - CHANGELOG* diff --git a/vendor/github.com/kyoh86/exportloopref/README.md b/vendor/github.com/kyoh86/exportloopref/README.md index 5c019c7380..0f581ffcee 100644 --- a/vendor/github.com/kyoh86/exportloopref/README.md +++ b/vendor/github.com/kyoh86/exportloopref/README.md @@ -1,6 +1,8 @@ # exportloopref An analyzer that finds exporting pointers for loop variables. +![](https://repository-images.githubusercontent.com/256768552/a1c5bb80-dd73-11eb-9453-e520f517e730) +Pin them all! [![PkgGoDev](https://pkg.go.dev/badge/kyoh86/exportloopref)](https://pkg.go.dev/kyoh86/exportloopref) [![Go Report Card](https://goreportcard.com/badge/github.com/kyoh86/exportloopref)](https://goreportcard.com/report/github.com/kyoh86/exportloopref) @@ -9,7 +11,7 @@ An analyzer that finds exporting pointers for loop variables. ## What's this? -Sample problem code from: https://github.com/kyoh86/exportloopref/blob/master/testdata/src/simple/simple.go +Sample problem code from: https://github.com/kyoh86/exportloopref/blob/main/testdata/src/simple/simple.go ```go package main @@ -109,7 +111,7 @@ func printp(p *int) { } ``` -ref: https://github.com/kyoh86/exportloopref/blob/master/testdata/src/fixed/fixed.go +ref: https://github.com/kyoh86/exportloopref/blob/main/testdata/src/fixed/fixed.go ## Sensing policy @@ -120,7 +122,7 @@ e.g. ```go var s Foo -for _, p := []int{10, 11, 12, 13} { +for _, p := range []int{10, 11, 12, 13} { s.Bar(&p) // If s stores the pointer, it will be bug. } ``` diff --git a/vendor/github.com/kyoh86/exportloopref/exportloopref.go b/vendor/github.com/kyoh86/exportloopref/exportloopref.go index 4d1671a060..d071d5c35f 100644 --- a/vendor/github.com/kyoh86/exportloopref/exportloopref.go +++ b/vendor/github.com/kyoh86/exportloopref/exportloopref.go @@ -17,21 +17,15 @@ var Analyzer = &analysis.Analyzer{ Run: run, RunDespiteErrors: true, Requires: []*analysis.Analyzer{inspect.Analyzer}, - // ResultType reflect.Type - // FactTypes []Fact -} - -func init() { - // Analyzer.Flags.StringVar(&v, "name", "default", "description") } func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) search := &Searcher{ - Stats: map[token.Pos]struct{}{}, - Vars: map[token.Pos]map[token.Pos]struct{}{}, - Types: pass.TypesInfo.Types, + LoopVars: map[token.Pos]struct{}{}, + LocalVars: map[token.Pos]map[token.Pos]struct{}{}, + Pass: pass, } nodeFilter := []ast.Node{ @@ -42,50 +36,60 @@ func run(pass *analysis.Pass) (interface{}, error) { (*ast.UnaryExpr)(nil), } - inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) bool { - id, insert, digg := search.Check(n, stack) - if id != nil { - dMsg := fmt.Sprintf("exporting a pointer for the loop variable %s", id.Name) - fMsg := fmt.Sprintf("loop variable %s should be pinned", id.Name) - var suggest []analysis.SuggestedFix - if insert != token.NoPos { - suggest = []analysis.SuggestedFix{{ - Message: fMsg, - TextEdits: []analysis.TextEdit{{ - Pos: insert, - End: insert, - NewText: []byte(fmt.Sprintf("%[1]s := %[1]s\n", id.Name)), - }}, - }} - } - d := analysis.Diagnostic{Pos: id.Pos(), - End: id.End(), - Message: dMsg, - Category: "exportloopref", - SuggestedFixes: suggest, - } - pass.Report(d) - } - return digg - }) + inspect.WithStack(nodeFilter, search.CheckAndReport) return nil, nil } type Searcher struct { - // Statement variables : map to collect positions that - // variables are declared like below. + // LoopVars is positions that loop-variables are declared like below. // - for , := range ... - // - var int - // - D := ... - Stats map[token.Pos]struct{} - // Local variables maps loop-position, decl-location to ignore - // safe pointers for variable which declared in the loop. - Vars map[token.Pos]map[token.Pos]struct{} - Types map[ast.Expr]types.TypeAndValue + // - for := ; ; + LoopVars map[token.Pos]struct{} + // LocalVars is positions of loops and the variables declared in them. + // Use this to determine if a point assignment is an export outside the loop. + LocalVars map[token.Pos]map[token.Pos]struct{} + + Pass *analysis.Pass +} + +// CheckAndReport inspects each node with stack. +// It is implemented as the I/F of the "golang.org/x/tools/go/analysis/passes/inspect".Analysis.WithStack. +func (s *Searcher) CheckAndReport(n ast.Node, push bool, stack []ast.Node) bool { + id, insert, digg := s.Check(n, stack) + if id == nil { + // no prob. + return digg + } + + // suggests fix + var suggest []analysis.SuggestedFix + if insert != token.NoPos { + suggest = []analysis.SuggestedFix{{ + Message: fmt.Sprintf("loop variable %s should be pinned", id.Name), + TextEdits: []analysis.TextEdit{{ + Pos: insert, + End: insert, + NewText: []byte(fmt.Sprintf("%[1]s := %[1]s\n", id.Name)), + }}, + }} + } + + // report a diagnostic + d := analysis.Diagnostic{Pos: id.Pos(), + End: id.End(), + Message: fmt.Sprintf("exporting a pointer for the loop variable %s", id.Name), + Category: "exportloopref", + SuggestedFixes: suggest, + } + s.Pass.Report(d) + return digg } -func (s *Searcher) Check(n ast.Node, stack []ast.Node) (*ast.Ident, token.Pos, bool) { +// Check each node and stack, whether it exports loop variables or not. +// Finding export, report the *ast.Ident of exported loop variable, +// and token.Pos to insert assignment to fix the diagnostic. +func (s *Searcher) Check(n ast.Node, stack []ast.Node) (loopVar *ast.Ident, insertPos token.Pos, digg bool) { switch typed := n.(type) { case *ast.RangeStmt: s.parseRangeStmt(typed) @@ -102,72 +106,92 @@ func (s *Searcher) Check(n ast.Node, stack []ast.Node) (*ast.Ident, token.Pos, b return nil, token.NoPos, true } +// parseRangeStmt will check range statement (i.e. `for , := range ...`), +// and collect positions of and . func (s *Searcher) parseRangeStmt(n *ast.RangeStmt) { - s.addStat(n.Key) - s.addStat(n.Value) + s.storeLoopVars(n.Key) + s.storeLoopVars(n.Value) } +// parseForStmt will check for statement (i.e. `for := ; ; `), +// and collect positions of . func (s *Searcher) parseForStmt(n *ast.ForStmt) { switch post := n.Post.(type) { case *ast.AssignStmt: // e.g. for p = head; p != nil; p = p.next for _, lhs := range post.Lhs { - s.addStat(lhs) + s.storeLoopVars(lhs) } case *ast.IncDecStmt: // e.g. for i := 0; i < n; i++ - s.addStat(post.X) + s.storeLoopVars(post.X) } } -func (s *Searcher) addStat(expr ast.Expr) { +func (s *Searcher) storeLoopVars(expr ast.Expr) { if id, ok := expr.(*ast.Ident); ok { - s.Stats[id.Pos()] = struct{}{} + s.LoopVars[id.Pos()] = struct{}{} } } +// parseDeclStmt will parse declaring statement (i.e. `var`, `type`, `const`), +// and store the position if it is "var" declaration and is in any loop. func (s *Searcher) parseDeclStmt(n *ast.DeclStmt, stack []ast.Node) { + genDecl, ok := n.Decl.(*ast.GenDecl) + if !ok { + // (dead branch) + // if the Decl is not GenDecl (i.e. `var`, `type` or `const` statement), it is ignored + return + } + if genDecl.Tok != token.VAR { + // if the Decl is not `var` (may be `type` or `const`), it is ignored + return + } + loop, _ := s.innermostLoop(stack) if loop == nil { return } - // Register declaring variables - if genDecl, ok := n.Decl.(*ast.GenDecl); ok && genDecl.Tok == token.VAR { - for _, spec := range genDecl.Specs { - for _, name := range spec.(*ast.ValueSpec).Names { - s.addVar(loop, name) - } + // Register declared variables + for _, spec := range genDecl.Specs { + for _, name := range spec.(*ast.ValueSpec).Names { + s.storeLocalVar(loop, name) } } } +// parseDeclStmt will parse assignment statement (i.e. ` = `), +// and store the position if it is . func (s *Searcher) parseAssignStmt(n *ast.AssignStmt, stack []ast.Node) { + if n.Tok != token.DEFINE { + // if the statement is simple assignment (without definement), it is ignored + return + } + loop, _ := s.innermostLoop(stack) if loop == nil { return } // Find statements declaring local variable - if n.Tok == token.DEFINE { - for _, h := range n.Lhs { - s.addVar(loop, h) - } + for _, h := range n.Lhs { + s.storeLocalVar(loop, h) } } -func (s *Searcher) addVar(loop ast.Node, expr ast.Expr) { +func (s *Searcher) storeLocalVar(loop ast.Node, expr ast.Expr) { loopPos := loop.Pos() id, ok := expr.(*ast.Ident) if !ok { return } - vars, ok := s.Vars[loopPos] + vars, ok := s.LocalVars[loopPos] if !ok { vars = map[token.Pos]struct{}{} } vars[id.Obj.Pos()] = struct{}{} - s.Vars[loopPos] = vars + s.LocalVars[loopPos] = vars } func insertionPosition(block *ast.BlockStmt) token.Pos { @@ -189,13 +213,15 @@ func (s *Searcher) innermostLoop(stack []ast.Node) (ast.Node, token.Pos) { return nil, token.NoPos } +// checkUnaryExpr check unary expression (i.e. like `-x`, `*p` or `&v`) and stack. +// THIS IS THE ESSENTIAL PART OF THIS PARSER. func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Ident, token.Pos, bool) { - loop, insert := s.innermostLoop(stack) - if loop == nil { + if n.Op != token.AND { return nil, token.NoPos, true } - if n.Op != token.AND { + loop, insert := s.innermostLoop(stack) + if loop == nil { return nil, token.NoPos, true } @@ -207,7 +233,7 @@ func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Iden // If the identity is not the loop statement variable, // it will not be reported. - if _, isStat := s.Stats[id.Obj.Pos()]; !isStat { + if _, isDecl := s.LoopVars[id.Obj.Pos()]; !isDecl { return nil, token.NoPos, true } @@ -266,12 +292,15 @@ func (s *Searcher) checkUnaryExpr(n *ast.UnaryExpr, stack []ast.Node) (*ast.Iden } func (s *Searcher) isVar(loop ast.Node, expr ast.Expr) bool { - vars := s.Vars[loop.Pos()] // map[token.Pos]struct{} + vars := s.LocalVars[loop.Pos()] // map[token.Pos]struct{} if vars == nil { return false } switch typed := expr.(type) { case (*ast.Ident): + if typed.Obj == nil { + return false // global var in another file (ref: #13) + } _, isVar := vars[typed.Obj.Pos()] return isVar case (*ast.IndexExpr): // like X[Y], check X @@ -287,7 +316,7 @@ func (s *Searcher) getIdentity(expr ast.Expr) *ast.Ident { switch typed := expr.(type) { case *ast.SelectorExpr: // Ignore if the parent is pointer ref (fix for #2) - if _, ok := s.Types[typed.X].Type.(*types.Pointer); ok { + if _, ok := s.Pass.TypesInfo.Types[typed.X].Type.(*types.Pointer); ok { return nil } diff --git a/vendor/github.com/ldez/tagliatelle/.golangci.yml b/vendor/github.com/ldez/tagliatelle/.golangci.yml index 53313e308e..ec5c5c7661 100644 --- a/vendor/github.com/ldez/tagliatelle/.golangci.yml +++ b/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -24,7 +24,7 @@ linters-settings: gofumpt: extra-rules: true depguard: - list-type: blacklist + list-type: denylist include-go-root: false packages: - github.com/sirupsen/logrus @@ -46,12 +46,19 @@ linters-settings: linters: enable-all: true disable: + - deadcode # deprecated + - exhaustivestruct # deprecated - golint # deprecated - - maligned # deprecated + - ifshort # deprecated - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated - sqlclosecheck # not relevant (SQL) - rowserrcheck # not relevant (SQL) + - execinquery # not relevant (SQL) - cyclop # duplicate of gocyclo - lll - dupl @@ -61,7 +68,7 @@ linters: - goerr113 - wrapcheck - exhaustive - - exhaustivestruct + - exhaustruct - testpackage - tparallel - paralleltest @@ -71,9 +78,11 @@ linters: - varnamelen - nilnil - errchkjson + - nonamedreturns issues: exclude-use-default: false max-per-linter: 0 max-same-issues: 0 - exclude: [] + exclude: + - 'package-comments: should have a package comment' diff --git a/vendor/github.com/ldez/tagliatelle/readme.md b/vendor/github.com/ldez/tagliatelle/readme.md index 85849eab43..55a544db81 100644 --- a/vendor/github.com/ldez/tagliatelle/readme.md +++ b/vendor/github.com/ldez/tagliatelle/readme.md @@ -11,10 +11,12 @@ Supported string casing: - `pascal` - `kebab` - `snake` +- `upperSnake` - `goCamel` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). - `goPascal` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). - `goKebab` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). - `goSnake` Respects [Go's common initialisms](https://github.com/golang/lint/blob/83fdc39ff7b56453e3793356bcff3070b9b96445/lint.go#L770-L809) (e.g. HttpResponse -> HTTPResponse). +- `header` - `upper` - `lower` @@ -44,18 +46,18 @@ Supported string casing: | NameJSON | NameJson | NameJSON | | UneTête | UneTête | UneTête | -| Source | Snake Case | Go Snake Case | -|----------------|------------------|------------------| -| GooID | goo_id | goo_ID | -| HTTPStatusCode | http_status_code | HTTP_status_code | -| FooBAR | foo_bar | foo_bar | -| URL | url | URL | -| ID | id | ID | -| hostIP | host_ip | host_IP | -| JSON | json | JSON | -| JSONName | json_name | JSON_name | -| NameJSON | name_json | name_JSON | -| UneTête | une_tête | une_tête | +| Source | Snake Case | Upper Snake Case | Go Snake Case | +|----------------|------------------|------------------|------------------| +| GooID | goo_id | GOO_ID | goo_ID | +| HTTPStatusCode | http_status_code | HTTP_STATUS_CODE | HTTP_status_code | +| FooBAR | foo_bar | FOO_BAR | foo_bar | +| URL | url | URL | URL | +| ID | id | ID | ID | +| hostIP | host_ip | HOST_IP | host_IP | +| JSON | json | JSON | JSON | +| JSONName | json_name | JSON_NAME | JSON_name | +| NameJSON | name_json | NAME_JSON | name_JSON | +| UneTête | une_tête | UNE_TÊTE | une_tête | | Source | Kebab Case | Go KebabCase | |----------------|------------------|------------------| @@ -70,6 +72,18 @@ Supported string casing: | NameJSON | name-json | name-JSON | | UneTête | une-tête | une-tête | +| Source | Header Case | +|----------------|------------------| +| GooID | Goo-Id | +| HTTPStatusCode | Http-Status-Code | +| FooBAR | Foo-Bar | +| URL | Url | +| ID | Id | +| hostIP | Host-Ip | +| JSON | Json | +| JSONName | Json-Name | +| NameJSON | Name-Json | +| UneTête | Une-Tête | ## Examples @@ -82,3 +96,82 @@ type Foo struct { Value string `json:"val,omitempty"`// must be "value" } ``` + +## What this tool is about + +This tool is about validating tags according to rules you define. +The tool also allows to fix tags according to the rules you defined. + +This tool is not intended to validate the fact a tag in valid or not. +To do that, you can use `go vet`, or use [golangci-lint](https://golangci-lint.run) ["go vet"](https://golangci-lint.run/usage/linters/#govet) linter. + +## How to use the tool + +### As a golangci-lint linter + +Define the rules, you want via your [golangci-lint](https://golangci-lint.run) configuration file: + +```yaml +linters-settings: + tagliatelle: + # Check the struck tag name case. + case: + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`. + json: camel + yaml: camel + xml: camel +``` + +More information here https://golangci-lint.run/usage/linters/#tagliatelle + +### Install and run it from the binary + +Not recommended. + +```shell +go install github.com/ldez/tagliatelle/cmd/tagliatelle@latest +``` + +then launch it manually. + +## Rules + +Here are the default rules for the well known and used tags, when using tagliatelle as a binary or [golangci-lint linter](https://golangci-lint.run/usage/linters/#tagliatelle): + +- `json`: `camel` +- `yaml`: `camel` +- `xml`: `camel` +- `bson`: `camel` +- `avro`: `snake` +- `header`: `header` +- `envconfig`: `upperSnake` + +### Custom Rules + +The tool is not limited to the tags used in example, you can use it to validate any tag. + +You can add your own tag, for example `whatever` and tells the tool you want to use `kebab`. + +This option is only available via [golangci-lint](https://golangci-lint.run). + +```yaml +linters-settings: + tagliatelle: + # Check the struck tag name case. + case: + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true + rules: + # Any struct tag type can be used. + # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` + json: camel + yaml: camel + xml: camel + whatever: kebab +``` diff --git a/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/vendor/github.com/ldez/tagliatelle/tagliatelle.go index 53e77d1cb0..22c5feb3d8 100644 --- a/vendor/github.com/ldez/tagliatelle/tagliatelle.go +++ b/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -200,11 +200,19 @@ func getConverter(c string) (func(s string) string, error) { return strcase.ToGoKebab, nil case "goSnake": return strcase.ToGoSnake, nil + case "header": + return toHeader, nil case "upper": return strings.ToUpper, nil + case "upperSnake": + return strcase.ToSNAKE, nil case "lower": return strings.ToLower, nil default: return nil, fmt.Errorf("unsupported case: %s", c) } } + +func toHeader(s string) string { + return strcase.ToCase(s, strcase.TitleCase, '-') +} diff --git a/vendor/github.com/leonklingele/grouper/LICENSE b/vendor/github.com/leonklingele/grouper/LICENSE new file mode 100644 index 0000000000..f288702d2f --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..9852c78380 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/analyzer.go @@ -0,0 +1,89 @@ +package analyzer + +import ( + "fmt" + "go/ast" + + "github.com/leonklingele/grouper/pkg/analyzer/consts" + "github.com/leonklingele/grouper/pkg/analyzer/imports" + "github.com/leonklingele/grouper/pkg/analyzer/types" + "github.com/leonklingele/grouper/pkg/analyzer/vars" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" +) + +const ( + Name = "grouper" + Doc = `expression group analyzer: require 'import', 'const', 'var' and/or 'type' declaration groups` +) + +func New() *analysis.Analyzer { + return &analysis.Analyzer{ //nolint:exhaustivestruct // we do not need all fields + Name: Name, + Doc: Doc, + Flags: Flags(), + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func run(p *analysis.Pass) (interface{}, error) { + flagLookupBool := func(name string) bool { + return p.Analyzer.Flags.Lookup(name).Value.String() == "true" + } + + c := &Config{ + ConstsConfig: &consts.Config{ + RequireSingleConst: flagLookupBool(FlagNameConstRequireSingleConst), + RequireGrouping: flagLookupBool(FlagNameConstRequireGrouping), + }, + + ImportsConfig: &imports.Config{ + RequireSingleImport: flagLookupBool(FlagNameImportRequireSingleImport), + RequireGrouping: flagLookupBool(FlagNameImportRequireGrouping), + }, + + TypesConfig: &types.Config{ + RequireSingleType: flagLookupBool(FlagNameTypeRequireSingleType), + RequireGrouping: flagLookupBool(FlagNameTypeRequireGrouping), + }, + + VarsConfig: &vars.Config{ + RequireSingleVar: flagLookupBool(FlagNameVarRequireSingleVar), + RequireGrouping: flagLookupBool(FlagNameVarRequireGrouping), + }, + } + + return nil, pass(c, p) +} + +func pass(c *Config, p *analysis.Pass) error { + for _, f := range p.Files { + if err := filepass(c, p, f); err != nil { + return err + } + } + + return nil +} + +func filepass(c *Config, p *analysis.Pass, f *ast.File) error { + if err := consts.Filepass(c.ConstsConfig, p, f); err != nil { + return fmt.Errorf("failed to consts.Filepass: %w", err) + } + + if err := imports.Filepass(c.ImportsConfig, p, f); err != nil { + return fmt.Errorf("failed to imports.Filepass: %w", err) + } + + if err := types.Filepass(c.TypesConfig, p, f); err != nil { + return fmt.Errorf("failed to types.Filepass: %w", err) + } + + if err := vars.Filepass(c.VarsConfig, p, f); err != nil { + return fmt.Errorf("failed to vars.Filepass: %w", err) + } + + return nil +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/config.go new file mode 100644 index 0000000000..b00595f9a5 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/config.go @@ -0,0 +1,15 @@ +package analyzer + +import ( + "github.com/leonklingele/grouper/pkg/analyzer/consts" + "github.com/leonklingele/grouper/pkg/analyzer/imports" + "github.com/leonklingele/grouper/pkg/analyzer/types" + "github.com/leonklingele/grouper/pkg/analyzer/vars" +) + +type Config struct { + ConstsConfig *consts.Config + ImportsConfig *imports.Config + TypesConfig *types.Config + VarsConfig *vars.Config +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/analyzer.go new file mode 100644 index 0000000000..e4e04c1270 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/analyzer.go @@ -0,0 +1,19 @@ +package consts + +import ( + "go/ast" + "go/token" + + "github.com/leonklingele/grouper/pkg/analyzer/globals" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Constant_declarations + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + return globals.Filepass( + p, f, + token.CONST, c.RequireSingleConst, c.RequireGrouping, + ) +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/config.go new file mode 100644 index 0000000000..aeeab40c78 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/consts/config.go @@ -0,0 +1,6 @@ +package consts + +type Config struct { + RequireSingleConst bool // Require the use of a single global 'const' declaration only + RequireGrouping bool // Require the use of grouped global 'const' declarations +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/flags.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/flags.go new file mode 100644 index 0000000000..42447cbefe --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/flags.go @@ -0,0 +1,37 @@ +package analyzer + +import ( + "flag" +) + +const ( + FlagNameConstRequireSingleConst = "const-require-single-const" + FlagNameConstRequireGrouping = "const-require-grouping" + + FlagNameImportRequireSingleImport = "import-require-single-import" + FlagNameImportRequireGrouping = "import-require-grouping" + + FlagNameTypeRequireSingleType = "type-require-single-type" + FlagNameTypeRequireGrouping = "type-require-grouping" + + FlagNameVarRequireSingleVar = "var-require-single-var" + FlagNameVarRequireGrouping = "var-require-grouping" +) + +func Flags() flag.FlagSet { + fs := flag.NewFlagSet(Name, flag.ExitOnError) + + fs.Bool(FlagNameConstRequireSingleConst, false, "require the use of a single global 'const' declaration only") + fs.Bool(FlagNameConstRequireGrouping, false, "require the use of grouped global 'const' declarations") + + fs.Bool(FlagNameImportRequireSingleImport, false, "require the use of a single 'import' declaration only") + fs.Bool(FlagNameImportRequireGrouping, false, "require the use of grouped 'import' declarations") + + fs.Bool(FlagNameTypeRequireSingleType, false, "require the use of a single global 'type' declaration only") + fs.Bool(FlagNameTypeRequireGrouping, false, "require the use of grouped global 'type' declarations") + + fs.Bool(FlagNameVarRequireSingleVar, false, "require the use of a single global 'var' declaration only") + fs.Bool(FlagNameVarRequireGrouping, false, "require the use of grouped global 'var' declarations") + + return *fs +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/globals/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/globals/analyzer.go new file mode 100644 index 0000000000..15940a4807 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/globals/analyzer.go @@ -0,0 +1,105 @@ +package globals + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +type Global struct { + Decl *ast.GenDecl + IsGroup bool +} + +func Filepass( + p *analysis.Pass, f *ast.File, + tkn token.Token, requireSingle, requireGrouping bool, +) error { + var globals []*Global + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + + if genDecl.Tok == tkn { + globals = append(globals, &Global{ + Decl: genDecl, + IsGroup: genDecl.Lparen != 0, + }) + } + } + + numGlobals := len(globals) + if numGlobals == 0 { + // Bail out early + return nil + } + + if requireSingle && numGlobals > 1 { + msg := fmt.Sprintf("should only use a single global '%s' declaration, %d found", tkn.String(), numGlobals) + dups := globals[1:] + firstdup := dups[0] + decl := firstdup.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if len(dups) > 1 { + report.Related = toRelated(dups[1:]) + } + + p.Report(report) + } + + if requireGrouping { + var ungrouped []*Global + for _, g := range globals { + if !g.IsGroup { + ungrouped = append(ungrouped, g) + } + } + + if numUngrouped := len(ungrouped); numUngrouped != 0 { + msg := fmt.Sprintf("should only use grouped global '%s' declarations", tkn.String()) + firstmatch := ungrouped[0] + decl := firstmatch.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if numUngrouped > 1 { + report.Related = toRelated(ungrouped[1:]) + } + + p.Report(report) + } + } + + return nil +} + +func toRelated(globals []*Global) []analysis.RelatedInformation { + related := make([]analysis.RelatedInformation, 0, len(globals)) + for _, g := range globals { + decl := g.Decl + + related = append(related, analysis.RelatedInformation{ + Pos: decl.Pos(), + End: decl.End(), + Message: "found here", + }) + } + + return related +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/analyzer.go new file mode 100644 index 0000000000..b545f00c05 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/analyzer.go @@ -0,0 +1,103 @@ +package imports + +import ( + "fmt" + "go/ast" + "go/token" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Import_declarations + +type Import struct { + Decl *ast.GenDecl + IsGroup bool +} + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + var imports []*Import + ast.Inspect(f, func(n ast.Node) bool { + if decl, ok := n.(*ast.GenDecl); ok { + if decl.Tok == token.IMPORT { + imports = append(imports, &Import{ + Decl: decl, + IsGroup: decl.Lparen != 0, + }) + } + } + + return true + }) + + numImports := len(imports) + if numImports == 0 { + // Bail out early + return nil + } + + if c.RequireSingleImport && numImports > 1 { + msg := fmt.Sprintf("should only use a single 'import' declaration, %d found", numImports) + dups := imports[1:] + firstdup := dups[0] + decl := firstdup.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if len(dups) > 1 { + report.Related = toRelated(dups[1:]) + } + + p.Report(report) + } + + if c.RequireGrouping { + var ungroupedImports []*Import + for _, imp := range imports { + if !imp.IsGroup { + ungroupedImports = append(ungroupedImports, imp) + } + } + + if numUngroupedImports := len(ungroupedImports); numUngroupedImports != 0 { + msg := "should only use grouped 'import' declarations" + firstmatch := ungroupedImports[0] + decl := firstmatch.Decl + + report := analysis.Diagnostic{ //nolint:exhaustivestruct // we do not need all fields + Pos: decl.Pos(), + End: decl.End(), + Message: msg, + // TODO(leon): Suggest fix + } + + if numUngroupedImports > 1 { + report.Related = toRelated(ungroupedImports[1:]) + } + + p.Report(report) + } + } + + return nil +} + +func toRelated(imports []*Import) []analysis.RelatedInformation { + related := make([]analysis.RelatedInformation, 0, len(imports)) + for _, imp := range imports { + decl := imp.Decl + + related = append(related, analysis.RelatedInformation{ + Pos: decl.Pos(), + End: decl.End(), + Message: "found here", + }) + } + + return related +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/config.go new file mode 100644 index 0000000000..6a6971b4ad --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/imports/config.go @@ -0,0 +1,6 @@ +package imports + +type Config struct { + RequireSingleImport bool // Require the use of a single 'import' declaration only + RequireGrouping bool // Require the use of grouped 'import' declarations +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/analyzer.go new file mode 100644 index 0000000000..63bbab33b7 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/analyzer.go @@ -0,0 +1,19 @@ +package types + +import ( + "go/ast" + "go/token" + + "github.com/leonklingele/grouper/pkg/analyzer/globals" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Type_declarations + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + return globals.Filepass( + p, f, + token.TYPE, c.RequireSingleType, c.RequireGrouping, + ) +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/config.go new file mode 100644 index 0000000000..e24cef9dae --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/types/config.go @@ -0,0 +1,6 @@ +package types + +type Config struct { + RequireSingleType bool // Require the use of a single global 'type' declaration only + RequireGrouping bool // Require the use of grouped global 'type' declarations +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/analyzer.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/analyzer.go new file mode 100644 index 0000000000..20c7812233 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/analyzer.go @@ -0,0 +1,19 @@ +package vars + +import ( + "go/ast" + "go/token" + + "github.com/leonklingele/grouper/pkg/analyzer/globals" + + "golang.org/x/tools/go/analysis" +) + +// https://go.dev/ref/spec#Variable_declarations + +func Filepass(c *Config, p *analysis.Pass, f *ast.File) error { + return globals.Filepass( + p, f, + token.VAR, c.RequireSingleVar, c.RequireGrouping, + ) +} diff --git a/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/config.go b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/config.go new file mode 100644 index 0000000000..4c7c1d8384 --- /dev/null +++ b/vendor/github.com/leonklingele/grouper/pkg/analyzer/vars/config.go @@ -0,0 +1,6 @@ +package vars + +type Config struct { + RequireSingleVar bool // Require the use of a single global 'var' declaration only + RequireGrouping bool // Require the use of grouped global 'var' declarations +} diff --git a/vendor/github.com/lufeee/execinquery/.gitignore b/vendor/github.com/lufeee/execinquery/.gitignore new file mode 100644 index 0000000000..00e1abc31f --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/.gitignore @@ -0,0 +1 @@ +execinquery diff --git a/vendor/github.com/lufeee/execinquery/LICENSE b/vendor/github.com/lufeee/execinquery/LICENSE new file mode 100644 index 0000000000..b6ab14aec3 --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 lufe + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lufeee/execinquery/README.md b/vendor/github.com/lufeee/execinquery/README.md new file mode 100644 index 0000000000..38fa7c8b96 --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/README.md @@ -0,0 +1,76 @@ +# execinquery - a simple query string checker in Query function +[![Go Matrix](https://github.com/lufeee/execinquery/actions/workflows/go-cross.yml/badge.svg?branch=main)](https://github.com/lufeee/execinquery/actions/workflows/go-cross.yml) +[![Go lint](https://github.com/lufeee/execinquery/actions/workflows/lint.yml/badge.svg?branch=main)](https://github.com/lufeee/execinquery/actions/workflows/lint.yml) +[![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) +## About + +execinquery is a linter about query string checker in Query function which reads your Go src files and +warnings it finds. + +## Installation + +```sh +go install github.com/lufeee/execinquery/cmd/execinquery +``` + +## Usage +```go +package main + +import ( + "database/sql" + "log" +) + +func main() { + db, err := sql.Open("mysql", "test:test@tcp(test:3306)/test") + if err != nil { + log.Fatal("Database Connect Error: ", err) + } + defer db.Close() + + test := "a" + _, err = db.Query("Update * FROM hoge where id = ?", test) + if err != nil { + log.Fatal("Query Error: ", err) + } + +} +``` + +```console +go vet -vettool=$(which execinquery) ./... + +# command-line-arguments +./a.go:16:11: Use Exec instead of Query to execute `UPDATE` query +``` + +## CI + +### CircleCI + +```yaml +- run: + name: install execinquery + command: go install github.com/lufeee/execinquery + +- run: + name: run execinquery + command: go vet -vettool=`which execinquery` ./... +``` + +### GitHub Actions + +```yaml +- name: install execinquery + run: go install github.com/lufeee/execinquery + +- name: run execinquery + run: go vet -vettool=`which execinquery` ./... +``` + +### License + +MIT license. + +
diff --git a/vendor/github.com/lufeee/execinquery/execinquery.go b/vendor/github.com/lufeee/execinquery/execinquery.go new file mode 100644 index 0000000000..c37dc17010 --- /dev/null +++ b/vendor/github.com/lufeee/execinquery/execinquery.go @@ -0,0 +1,135 @@ +package execinquery + +import ( + "go/ast" + "regexp" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds" + +// Analyzer is checking database/sql pkg Query's function +var Analyzer = &analysis.Analyzer{ + Name: "execinquery", + Doc: doc, + Run: newLinter().run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +type linter struct { + commentExp *regexp.Regexp + multilineCommentExp *regexp.Regexp +} + +func newLinter() *linter { + return &linter{ + commentExp: regexp.MustCompile(`--[^\n]*\n`), + multilineCommentExp: regexp.MustCompile(`(?s)/\*.*?\*/`), + } +} + +func (l linter) run(pass *analysis.Pass) (interface{}, error) { + result := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + result.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.CallExpr: + selector, ok := n.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + if pass.TypesInfo == nil || pass.TypesInfo.Uses[selector.Sel] == nil || pass.TypesInfo.Uses[selector.Sel].Pkg() == nil { + return + } + + if "database/sql" != pass.TypesInfo.Uses[selector.Sel].Pkg().Path() { + return + } + + if !strings.Contains(selector.Sel.Name, "Query") { + return + } + + replacement := "Exec" + var i int // the index of the query argument + if strings.Contains(selector.Sel.Name, "Context") { + replacement += "Context" + i = 1 + } + + if len(n.Args) <= i { + return + } + + query := l.getQueryString(n.Args[i]) + if query == "" { + return + } + + query = strings.TrimSpace(l.cleanValue(query)) + parts := strings.SplitN(query, " ", 2) + cmd := strings.ToUpper(parts[0]) + + if strings.HasPrefix(cmd, "SELECT") { + return + } + + pass.Reportf(n.Fun.Pos(), "Use %s instead of %s to execute `%s` query", replacement, selector.Sel.Name, cmd) + } + }) + + return nil, nil +} + +func (l linter) cleanValue(s string) string { + v := strings.NewReplacer(`"`, "", "`", "").Replace(s) + + v = l.multilineCommentExp.ReplaceAllString(v, "") + + return l.commentExp.ReplaceAllString(v, "") +} + +func (l linter) getQueryString(exp interface{}) string { + switch e := exp.(type) { + case *ast.AssignStmt: + var v string + for _, stmt := range e.Rhs { + v += l.cleanValue(l.getQueryString(stmt)) + } + return v + + case *ast.BasicLit: + return e.Value + + case *ast.ValueSpec: + var v string + for _, value := range e.Values { + v += l.cleanValue(l.getQueryString(value)) + } + return v + + case *ast.Ident: + if e.Obj == nil { + return "" + } + return l.getQueryString(e.Obj.Decl) + + case *ast.BinaryExpr: + v := l.cleanValue(l.getQueryString(e.X)) + v += l.cleanValue(l.getQueryString(e.Y)) + return v + } + + return "" +} diff --git a/vendor/github.com/macabu/inamedparam/.gitignore b/vendor/github.com/macabu/inamedparam/.gitignore new file mode 100644 index 0000000000..f8d51e94cb --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/.gitignore @@ -0,0 +1,22 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +inamedparam + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/macabu/inamedparam/.golangci.yml b/vendor/github.com/macabu/inamedparam/.golangci.yml new file mode 100644 index 0000000000..f0efa1cb6c --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/.golangci.yml @@ -0,0 +1,33 @@ +run: + deadline: 30s + +linters: + enable-all: true + disable: + - cyclop + - deadcode + - depguard + - exhaustivestruct + - exhaustruct + - forcetypeassert + - gochecknoglobals + - gocognit + - golint + - ifshort + - interfacer + - maligned + - nilnil + - nosnakecase + - paralleltest + - scopelint + - structcheck + - varcheck + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/macabu/inamedparam) + section-separators: + - newLine diff --git a/vendor/github.com/macabu/inamedparam/LICENSE-MIT b/vendor/github.com/macabu/inamedparam/LICENSE-MIT new file mode 100644 index 0000000000..b95f480ee5 --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Matheus Macabu + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/macabu/inamedparam/README.md b/vendor/github.com/macabu/inamedparam/README.md new file mode 100644 index 0000000000..3336cb9504 --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/README.md @@ -0,0 +1,38 @@ +# inamedparam + +A linter that reports interfaces with unnamed method parameters. + +## Flags/Config +```sh +-skip-single-param + skip interfaces with a single unnamed parameter +``` + +## Usage + +### Standalone +You can run it standalone through `go vet`. + +You must install the binary to your `$GOBIN` folder like so: +```sh +$ go install github.com/macabu/inamedparam/cmd/inamedparam +``` + +And then navigate to your Go project's root folder, where can run `go vet` in the following way: +```sh +$ go vet -vettool=$(which inamedparam) ./... +``` + +### golangci-lint +`inamedparam` was added as a linter to `golangci-lint` on version `v1.55.0`. It is disabled by default. + +To enable it, you can add it to your `.golangci.yml` file, as such: +```yaml +run: + deadline: 30s + +linters: + disable-all: true + enable: + - inamedparam +``` diff --git a/vendor/github.com/macabu/inamedparam/inamedparam.go b/vendor/github.com/macabu/inamedparam/inamedparam.go new file mode 100644 index 0000000000..8ba7fe1882 --- /dev/null +++ b/vendor/github.com/macabu/inamedparam/inamedparam.go @@ -0,0 +1,94 @@ +package inamedparam + +import ( + "flag" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + analyzerName = "inamedparam" + + flagSkipSingleParam = "skip-single-param" +) + +var Analyzer = &analysis.Analyzer{ + Name: analyzerName, + Doc: "reports interfaces with unnamed method parameters", + Run: run, + Flags: flags(), + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet(analyzerName, flag.ExitOnError) + + flags.Bool(flagSkipSingleParam, false, "skip interface methods with a single unnamed parameter") + + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + types := []ast.Node{ + &ast.InterfaceType{}, + } + + skipSingleParam := pass.Analyzer.Flags.Lookup(flagSkipSingleParam).Value.(flag.Getter).Get().(bool) + + inspect.Preorder(types, func(n ast.Node) { + interfaceType, ok := n.(*ast.InterfaceType) + if !ok || interfaceType == nil || interfaceType.Methods == nil { + return + } + + for _, method := range interfaceType.Methods.List { + interfaceFunc, ok := method.Type.(*ast.FuncType) + if !ok || interfaceFunc == nil || interfaceFunc.Params == nil { + continue + } + + // Improvement: add test case to reproduce this. Help wanted. + if len(method.Names) == 0 { + continue + } + + methodName := method.Names[0].Name + + if skipSingleParam && len(interfaceFunc.Params.List) == 1 { + continue + } + + for _, param := range interfaceFunc.Params.List { + if param.Names == nil { + var builtParamType string + + switch paramType := param.Type.(type) { + case *ast.SelectorExpr: + if ident := paramType.X.(*ast.Ident); ident != nil { + builtParamType += ident.Name + "." + } + + builtParamType += paramType.Sel.Name + case *ast.Ident: + builtParamType = paramType.Name + } + + if builtParamType != "" { + pass.Reportf(param.Pos(), "interface method %v must have named param for type %v", methodName, builtParamType) + } else { + pass.Reportf(param.Pos(), "interface method %v must have all named params", methodName) + } + } + } + } + }) + + return nil, nil +} diff --git a/vendor/github.com/maratori/testableexamples/LICENSE b/vendor/github.com/maratori/testableexamples/LICENSE new file mode 100644 index 0000000000..e8b68be3eb --- /dev/null +++ b/vendor/github.com/maratori/testableexamples/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Marat Reymers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/maratori/testableexamples/pkg/testableexamples/testableexamples.go b/vendor/github.com/maratori/testableexamples/pkg/testableexamples/testableexamples.go new file mode 100644 index 0000000000..26d22c703a --- /dev/null +++ b/vendor/github.com/maratori/testableexamples/pkg/testableexamples/testableexamples.go @@ -0,0 +1,34 @@ +package testableexamples + +import ( + "go/ast" + "go/doc" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// NewAnalyzer returns Analyzer that checks if examples are testable. +func NewAnalyzer() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "testableexamples", + Doc: "linter checks if examples are testable (have an expected output)", + Run: func(pass *analysis.Pass) (interface{}, error) { + testFiles := make([]*ast.File, 0, len(pass.Files)) + for _, file := range pass.Files { + fileName := pass.Fset.File(file.Pos()).Name() + if strings.HasSuffix(fileName, "_test.go") { + testFiles = append(testFiles, file) + } + } + + for _, example := range doc.Examples(testFiles...) { + if example.Output == "" && !example.EmptyOutput { + pass.Reportf(example.Code.Pos(), "missing output for example, go test can't validate it") + } + } + + return nil, nil + }, + } +} diff --git a/vendor/github.com/matoous/godox/godox.go b/vendor/github.com/matoous/godox/godox.go index 6d7104b09d..3903525c80 100644 --- a/vendor/github.com/matoous/godox/godox.go +++ b/vendor/github.com/matoous/godox/godox.go @@ -8,31 +8,23 @@ import ( "go/token" "path/filepath" "strings" + "unicode" + "unicode/utf8" ) -var ( - defaultKeywords = []string{"TODO", "BUG", "FIXME"} -) +var defaultKeywords = []string{"TODO", "BUG", "FIXME"} -// Message contains a message and position +// Message contains a message and position. type Message struct { Pos token.Position Message string } -func getMessages(c *ast.Comment, fset *token.FileSet, keywords []string) []Message { - commentText := c.Text - switch commentText[1] { - case '/': - commentText = commentText[2:] - if len(commentText) > 0 && commentText[0] == ' ' { - commentText = commentText[1:] - } - case '*': - commentText = commentText[2 : len(commentText)-2] - } +func getMessages(comment *ast.Comment, fset *token.FileSet, keywords []string) []Message { + commentText := extractComment(comment.Text) b := bufio.NewReader(bytes.NewBufferString(commentText)) + var comments []Message for lineNum := 0; ; lineNum++ { @@ -40,45 +32,88 @@ func getMessages(c *ast.Comment, fset *token.FileSet, keywords []string) []Messa if err != nil { break } + + const minimumSize = 4 + sComment := bytes.TrimSpace(line) - if len(sComment) < 4 { + if len(sComment) < minimumSize { continue } + for _, kw := range keywords { - if bytes.EqualFold([]byte(kw), sComment[0:len(kw)]) { - pos := fset.Position(c.Pos()) - // trim the comment - if len(sComment) > 40 { - sComment = []byte(fmt.Sprintf("%.40s...", sComment)) - } - comments = append(comments, Message{ - Pos: pos, - Message: fmt.Sprintf( - "%s:%d: Line contains %s: \"%s\"", - filepath.Join(pos.Filename), - pos.Line+lineNum, - strings.Join(keywords, "/"), - sComment, - ), - }) - break + if lkw := len(kw); !(bytes.EqualFold([]byte(kw), sComment[0:lkw]) && + !hasAlphanumRuneAdjacent(sComment[lkw:])) { + continue + } + + pos := fset.Position(comment.Pos()) + // trim the comment + const commentLimit = 40 + if len(sComment) > commentLimit { + sComment = []byte(fmt.Sprintf("%.40s...", sComment)) } + + comments = append(comments, Message{ + Pos: pos, + Message: fmt.Sprintf( + "%s:%d: Line contains %s: %q", + filepath.Clean(pos.Filename), + pos.Line+lineNum, + strings.Join(keywords, "/"), + sComment, + ), + }) + + break } } + return comments } +func extractComment(commentText string) string { + switch commentText[1] { + case '/': + commentText = commentText[2:] + if len(commentText) > 0 && commentText[0] == ' ' { + commentText = commentText[1:] + } + case '*': + commentText = commentText[2 : len(commentText)-2] + } + + return commentText +} + +func hasAlphanumRuneAdjacent(rest []byte) bool { + if len(rest) == 0 { + return false + } + + switch rest[0] { // most common cases + case ':', ' ', '(': + return false + } + + r, _ := utf8.DecodeRune(rest) + + return unicode.IsLetter(r) || unicode.IsNumber(r) || unicode.IsDigit(r) +} + // Run runs the godox linter on given file. // Godox searches for comments starting with given keywords and reports them. func Run(file *ast.File, fset *token.FileSet, keywords ...string) []Message { if len(keywords) == 0 { keywords = defaultKeywords } + var messages []Message + for _, c := range file.Comments { for _, ci := range c.List { messages = append(messages, getMessages(ci, fset, keywords)...) } } + return messages } diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 39bbcf00f0..d0ea68f408 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,6 +1,7 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine -// +build darwin freebsd openbsd netbsd dragonfly +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo +// +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index 31503226f6..7402e0618a 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,5 +1,6 @@ -//go:build appengine || js || nacl || wasm -// +build appengine js nacl wasm +//go:build (appengine || js || nacl || tinygo || wasm) && !windows +// +build appengine js nacl tinygo wasm +// +build !windows package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 67787657fb..0337d8cf6d 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,7 @@ -//go:build (linux || aix || zos) && !appengine +//go:build (linux || aix || zos) && !appengine && !tinygo // +build linux aix zos // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mgechev/dots/.travis.yml b/vendor/github.com/mgechev/dots/.travis.yml deleted file mode 100644 index f4a4a7363c..0000000000 --- a/vendor/github.com/mgechev/dots/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -go: master diff --git a/vendor/github.com/mgechev/dots/README.md b/vendor/github.com/mgechev/dots/README.md deleted file mode 100644 index 1203aef5f7..0000000000 --- a/vendor/github.com/mgechev/dots/README.md +++ /dev/null @@ -1,100 +0,0 @@ -[![Build Status](https://travis-ci.org/mgechev/dots.svg?branch=master)](https://travis-ci.org/mgechev/dots) - -# Dots - -Implements the wildcard file matching in Go used by golint, go test etc. - -## Usage - -```go -import "github.com/mgechev/dots" - -func main() { - result, err := dots.Resolve([]string{"./fixtures/..."}, []string{"./fixtures/foo"}) - for _, f := range result { - fmt.Println(f); - } -} -``` - -If we suppose that we have the following directory structure: - -```text -├── README.md -├── fixtures -│   ├── bar -│   │   ├── bar1.go -│   │   └── bar2.go -│   ├── baz -│   │   ├── baz1.go -│   │   ├── baz2.go -│   │   └── baz3.go -│   └── foo -│   ├── foo1.go -│   ├── foo2.go -│   └── foo3.go -└── main.go -``` - -The result will be: - -```text -fixtures/bar/bar1.go -fixtures/bar/bar2.go -fixtures/baz/baz1.go -fixtures/baz/baz2.go -fixtures/baz/baz3.go -``` - -`dots` supports wildcard in both - the first and the last argument of `Resolve`, which means that you can ignore files based on a wildcard: - -```go -dots.Resolve([]string{"github.com/mgechev/dots"}, []string{"./..."}) // empty list -dots.Resolve([]string{"./fixtures/bar/..."}, []string{"./fixture/foo/...", "./fixtures/baz/..."}) // bar1.go, bar2.go -``` - -## Preserve package structure - -`dots` allow you to receive a slice of slices where each nested slice represents an individual package: - -```go -dots.ResolvePackages([]string{"github.com/mgechev/dots/..."}, []string{}) -``` - -So we will get the result: - -```text -[ - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/bar/bar1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/bar/bar2.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz2.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/baz/baz3.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo2.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/dummy/foo/foo3.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/baz/baz1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/baz/baz2.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/foo1.go", - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/foo2.go" - ], - [ - "$GOROOT/src/github.com/mgechev/dots/fixtures/pkg/foo/bar/bar1.go" - ] -] -``` - -This method is especially useful, when you want to perform type checking over given package from the result. - -## License - -MIT diff --git a/vendor/github.com/mgechev/revive/config/config.go b/vendor/github.com/mgechev/revive/config/config.go index 5c63f35b3e..50a2b8966f 100644 --- a/vendor/github.com/mgechev/revive/config/config.go +++ b/vendor/github.com/mgechev/revive/config/config.go @@ -1,13 +1,14 @@ +// Package config implements revive's configuration data structures and related methods package config import ( "errors" "fmt" - "io/ioutil" - - "github.com/mgechev/revive/formatter" + "os" "github.com/BurntSushi/toml" + + "github.com/mgechev/revive/formatter" "github.com/mgechev/revive/lint" "github.com/mgechev/revive/rule" ) @@ -31,21 +32,22 @@ var defaultRules = []lint.Rule{ &rule.TimeNamingRule{}, &rule.ContextKeysType{}, &rule.ContextAsArgumentRule{}, + &rule.EmptyBlockRule{}, + &rule.SuperfluousElseRule{}, + &rule.UnusedParamRule{}, + &rule.UnreachableCodeRule{}, + &rule.RedefinesBuiltinIDRule{}, } var allRules = append([]lint.Rule{ &rule.ArgumentsLimitRule{}, &rule.CyclomaticRule{}, &rule.FileHeaderRule{}, - &rule.EmptyBlockRule{}, - &rule.SuperfluousElseRule{}, &rule.ConfusingNamingRule{}, &rule.GetReturnRule{}, &rule.ModifiesParamRule{}, &rule.ConfusingResultsRule{}, &rule.DeepExitRule{}, - &rule.UnusedParamRule{}, - &rule.UnreachableCodeRule{}, &rule.AddConstantRule{}, &rule.FlagParamRule{}, &rule.UnnecessaryStmtRule{}, @@ -53,8 +55,7 @@ var allRules = append([]lint.Rule{ &rule.ModifiesValRecRule{}, &rule.ConstantLogicalExprRule{}, &rule.BoolLiteralRule{}, - &rule.RedefinesBuiltinIDRule{}, - &rule.ImportsBlacklistRule{}, + &rule.ImportsBlocklistRule{}, &rule.FunctionResultsLimitRule{}, &rule.MaxPublicStructsRule{}, &rule.RangeValInClosureRule{}, @@ -79,13 +80,21 @@ var allRules = append([]lint.Rule{ &rule.UnexportedNamingRule{}, &rule.FunctionLength{}, &rule.NestedStructs{}, - &rule.IfReturnRule{}, &rule.UselessBreak{}, + &rule.UncheckedTypeAssertionRule{}, &rule.TimeEqualRule{}, &rule.BannedCharsRule{}, &rule.OptimizeOperandsOrderRule{}, &rule.UseAnyRule{}, &rule.DataRaceRule{}, + &rule.CommentSpacingsRule{}, + &rule.IfReturnRule{}, + &rule.RedundantImportAlias{}, + &rule.ImportAliasNamingRule{}, + &rule.EnforceMapStyleRule{}, + &rule.EnforceRepeatedArgTypeStyleRule{}, + &rule.EnforceSliceStyleRule{}, + &rule.MaxControlNestingRule{}, }, defaultRules...) var allFormatters = []lint.Formatter{ @@ -123,7 +132,8 @@ func GetLintingRules(config *lint.Config, extraRules []lint.Rule) ([]lint.Rule, var lintingRules []lint.Rule for name, ruleConfig := range config.Rules { - r, ok := rulesMap[name] + actualName := actualRuleName(name) + r, ok := rulesMap[actualName] if !ok { return nil, fmt.Errorf("cannot find rule: %s", name) } @@ -138,8 +148,17 @@ func GetLintingRules(config *lint.Config, extraRules []lint.Rule) ([]lint.Rule, return lintingRules, nil } +func actualRuleName(name string) string { + switch name { + case "imports-blacklist": + return "imports-blocklist" + default: + return name + } +} + func parseConfig(path string, config *lint.Config) error { - file, err := ioutil.ReadFile(path) + file, err := os.ReadFile(path) if err != nil { return errors.New("cannot read the config file") } @@ -147,6 +166,14 @@ func parseConfig(path string, config *lint.Config) error { if err != nil { return fmt.Errorf("cannot parse the config file: %v", err) } + for k, r := range config.Rules { + err := r.Initialize() + if err != nil { + return fmt.Errorf("error in config of rule [%s] : [%v]", k, err) + } + config.Rules[k] = r + } + return nil } diff --git a/vendor/github.com/mgechev/revive/formatter/checkstyle.go b/vendor/github.com/mgechev/revive/formatter/checkstyle.go index 33a3b2ca17..f45b63c925 100644 --- a/vendor/github.com/mgechev/revive/formatter/checkstyle.go +++ b/vendor/github.com/mgechev/revive/formatter/checkstyle.go @@ -3,7 +3,7 @@ package formatter import ( "bytes" "encoding/xml" - plainTemplate "text/template" + plain "text/template" "github.com/mgechev/revive/lint" ) @@ -50,7 +50,7 @@ func (*Checkstyle) Format(failures <-chan lint.Failure, config lint.Config) (str issues[fn] = append(issues[fn], iss) } - t, err := plainTemplate.New("revive").Parse(checkstyleTemplate) + t, err := plain.New("revive").Parse(checkstyleTemplate) if err != nil { return "", err } diff --git a/vendor/github.com/mgechev/revive/formatter/default.go b/vendor/github.com/mgechev/revive/formatter/default.go index f76a7b29ab..2d5a04434f 100644 --- a/vendor/github.com/mgechev/revive/formatter/default.go +++ b/vendor/github.com/mgechev/revive/formatter/default.go @@ -1,6 +1,7 @@ package formatter import ( + "bytes" "fmt" "github.com/mgechev/revive/lint" @@ -19,8 +20,9 @@ func (*Default) Name() string { // Format formats the failures gotten from the lint. func (*Default) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + var buf bytes.Buffer for failure := range failures { - fmt.Printf("%v: %s\n", failure.Position.Start, failure.Failure) + fmt.Fprintf(&buf, "%v: %s\n", failure.Position.Start, failure.Failure) } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/doc.go b/vendor/github.com/mgechev/revive/formatter/doc.go new file mode 100644 index 0000000000..bb89f20ea6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/formatter/doc.go @@ -0,0 +1,2 @@ +// Package formatter implements the linter output formatters. +package formatter diff --git a/vendor/github.com/mgechev/revive/formatter/friendly.go b/vendor/github.com/mgechev/revive/formatter/friendly.go index ced8fa46c9..5ff329a23c 100644 --- a/vendor/github.com/mgechev/revive/formatter/friendly.go +++ b/vendor/github.com/mgechev/revive/formatter/friendly.go @@ -3,6 +3,7 @@ package formatter import ( "bytes" "fmt" + "io" "sort" "github.com/fatih/color" @@ -31,13 +32,14 @@ func (*Friendly) Name() string { // Format formats the failures gotten from the lint. func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { + var buf bytes.Buffer errorMap := map[string]int{} warningMap := map[string]int{} totalErrors := 0 totalWarnings := 0 for failure := range failures { sev := severity(config, failure) - f.printFriendlyFailure(failure, sev) + f.printFriendlyFailure(&buf, failure, sev) if sev == lint.SeverityWarning { warningMap[failure.RuleName]++ totalWarnings++ @@ -47,29 +49,29 @@ func (f *Friendly) Format(failures <-chan lint.Failure, config lint.Config) (str totalErrors++ } } - f.printSummary(totalErrors, totalWarnings) - f.printStatistics(color.RedString("Errors:"), errorMap) - f.printStatistics(color.YellowString("Warnings:"), warningMap) - return "", nil + f.printSummary(&buf, totalErrors, totalWarnings) + f.printStatistics(&buf, color.RedString("Errors:"), errorMap) + f.printStatistics(&buf, color.YellowString("Warnings:"), warningMap) + return buf.String(), nil } -func (f *Friendly) printFriendlyFailure(failure lint.Failure, severity lint.Severity) { - f.printHeaderRow(failure, severity) - f.printFilePosition(failure) - fmt.Println() - fmt.Println() +func (f *Friendly) printFriendlyFailure(w io.Writer, failure lint.Failure, severity lint.Severity) { + f.printHeaderRow(w, failure, severity) + f.printFilePosition(w, failure) + fmt.Fprintln(w) + fmt.Fprintln(w) } -func (f *Friendly) printHeaderRow(failure lint.Failure, severity lint.Severity) { +func (f *Friendly) printHeaderRow(w io.Writer, failure lint.Failure, severity lint.Severity) { emoji := getWarningEmoji() if severity == lint.SeverityError { emoji = getErrorEmoji() } - fmt.Print(f.table([][]string{{emoji, "https://revive.run/r#" + failure.RuleName, color.GreenString(failure.Failure)}})) + fmt.Fprint(w, f.table([][]string{{emoji, "https://revive.run/r#" + failure.RuleName, color.GreenString(failure.Failure)}})) } -func (*Friendly) printFilePosition(failure lint.Failure) { - fmt.Printf(" %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column) +func (*Friendly) printFilePosition(w io.Writer, failure lint.Failure) { + fmt.Fprintf(w, " %s:%d:%d", failure.GetFilename(), failure.Position.Start.Line, failure.Position.Start.Column) } type statEntry struct { @@ -77,7 +79,7 @@ type statEntry struct { failures int } -func (*Friendly) printSummary(errors, warnings int) { +func (*Friendly) printSummary(w io.Writer, errors, warnings int) { emoji := getWarningEmoji() if errors > 0 { emoji = getErrorEmoji() @@ -96,18 +98,18 @@ func (*Friendly) printSummary(errors, warnings int) { } str := fmt.Sprintf("%d %s (%d %s, %d %s)", errors+warnings, problemsLabel, errors, errorsLabel, warnings, warningsLabel) if errors > 0 { - fmt.Printf("%s %s\n", emoji, color.RedString(str)) - fmt.Println() + fmt.Fprintf(w, "%s %s\n", emoji, color.RedString(str)) + fmt.Fprintln(w) return } if warnings > 0 { - fmt.Printf("%s %s\n", emoji, color.YellowString(str)) - fmt.Println() + fmt.Fprintf(w, "%s %s\n", emoji, color.YellowString(str)) + fmt.Fprintln(w) return } } -func (f *Friendly) printStatistics(header string, stats map[string]int) { +func (f *Friendly) printStatistics(w io.Writer, header string, stats map[string]int) { if len(stats) == 0 { return } @@ -122,8 +124,8 @@ func (f *Friendly) printStatistics(header string, stats map[string]int) { for _, entry := range data { formatted = append(formatted, []string{color.GreenString(fmt.Sprintf("%d", entry.failures)), entry.name}) } - fmt.Println(header) - fmt.Println(f.table(formatted)) + fmt.Fprintln(w, header) + fmt.Fprintln(w, f.table(formatted)) } func (*Friendly) table(rows [][]string) string { diff --git a/vendor/github.com/mgechev/revive/formatter/ndjson.go b/vendor/github.com/mgechev/revive/formatter/ndjson.go index a02d9c80fa..58b35dc44d 100644 --- a/vendor/github.com/mgechev/revive/formatter/ndjson.go +++ b/vendor/github.com/mgechev/revive/formatter/ndjson.go @@ -1,8 +1,8 @@ package formatter import ( + "bytes" "encoding/json" - "os" "github.com/mgechev/revive/lint" ) @@ -20,7 +20,8 @@ func (*NDJSON) Name() string { // Format formats the failures gotten from the lint. func (*NDJSON) Format(failures <-chan lint.Failure, config lint.Config) (string, error) { - enc := json.NewEncoder(os.Stdout) + var buf bytes.Buffer + enc := json.NewEncoder(&buf) for failure := range failures { obj := jsonObject{} obj.Severity = severity(config, failure) @@ -30,5 +31,5 @@ func (*NDJSON) Format(failures <-chan lint.Failure, config lint.Config) (string, return "", err } } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/plain.go b/vendor/github.com/mgechev/revive/formatter/plain.go index 6e083bcfd0..09ebf6cdc8 100644 --- a/vendor/github.com/mgechev/revive/formatter/plain.go +++ b/vendor/github.com/mgechev/revive/formatter/plain.go @@ -1,6 +1,7 @@ package formatter import ( + "bytes" "fmt" "github.com/mgechev/revive/lint" @@ -19,8 +20,9 @@ func (*Plain) Name() string { // Format formats the failures gotten from the lint. func (*Plain) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + var buf bytes.Buffer for failure := range failures { - fmt.Printf("%v: %s %s\n", failure.Position.Start, failure.Failure, "https://revive.run/r#"+failure.RuleName) + fmt.Fprintf(&buf, "%v: %s %s\n", failure.Position.Start, failure.Failure, "https://revive.run/r#"+failure.RuleName) } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/formatter/sarif.go b/vendor/github.com/mgechev/revive/formatter/sarif.go index ee62adcc02..c42da73eb0 100644 --- a/vendor/github.com/mgechev/revive/formatter/sarif.go +++ b/vendor/github.com/mgechev/revive/formatter/sarif.go @@ -81,14 +81,14 @@ func (l *reviveRunLog) AddResult(failure lint.Failure) { } position := failure.Position filename := position.Start.Filename - line := positiveOrZero(position.Start.Line - 1) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_line - column := positiveOrZero(position.Start.Column - 1) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_column + line := positiveOrZero(position.Start.Line) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_line + column := positiveOrZero(position.Start.Column) // https://docs.oasis-open.org/sarif/sarif/v2.1.0/csprd01/sarif-v2.1.0-csprd01.html#def_column result := garif.NewResult(garif.NewMessageFromText(failure.Failure)) location := garif.NewLocation().WithURI(filename).WithLineColumn(line, column) result.Locations = append(result.Locations, location) result.RuleId = failure.RuleName - result.Level = l.rules[failure.RuleName].Severity + result.Level = garif.ResultLevel(l.rules[failure.RuleName].Severity) l.run.Results = append(l.run.Results, result) } diff --git a/vendor/github.com/mgechev/revive/formatter/unix.go b/vendor/github.com/mgechev/revive/formatter/unix.go index ef2f1613ac..e46f3c275f 100644 --- a/vendor/github.com/mgechev/revive/formatter/unix.go +++ b/vendor/github.com/mgechev/revive/formatter/unix.go @@ -1,6 +1,7 @@ package formatter import ( + "bytes" "fmt" "github.com/mgechev/revive/lint" @@ -8,7 +9,8 @@ import ( // Unix is an implementation of the Formatter interface // which formats the errors to a simple line based error format -// main.go:24:9: [errorf] should replace errors.New(fmt.Sprintf(...)) with fmt.Errorf(...) +// +// main.go:24:9: [errorf] should replace errors.New(fmt.Sprintf(...)) with fmt.Errorf(...) type Unix struct { Metadata lint.FormatterMetadata } @@ -20,8 +22,9 @@ func (*Unix) Name() string { // Format formats the failures gotten from the lint. func (*Unix) Format(failures <-chan lint.Failure, _ lint.Config) (string, error) { + var buf bytes.Buffer for failure := range failures { - fmt.Printf("%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure) + fmt.Fprintf(&buf, "%v: [%s] %s\n", failure.Position.Start, failure.RuleName, failure.Failure) } - return "", nil + return buf.String(), nil } diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/args.go b/vendor/github.com/mgechev/revive/internal/ifelse/args.go new file mode 100644 index 0000000000..c6e647e697 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/args.go @@ -0,0 +1,11 @@ +package ifelse + +// PreserveScope is a configuration argument that prevents suggestions +// that would enlarge variable scope +const PreserveScope = "preserveScope" + +// Args contains arguments common to the early-return, indent-error-flow +// and superfluous-else rules (currently just preserveScope) +type Args struct { + PreserveScope bool +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/branch.go b/vendor/github.com/mgechev/revive/internal/ifelse/branch.go new file mode 100644 index 0000000000..6e6036b899 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/branch.go @@ -0,0 +1,93 @@ +package ifelse + +import ( + "fmt" + "go/ast" + "go/token" +) + +// Branch contains information about a branch within an if-else chain. +type Branch struct { + BranchKind + Call // The function called at the end for kind Panic or Exit. + HasDecls bool // The branch has one or more declarations (at the top level block) +} + +// BlockBranch gets the Branch of an ast.BlockStmt. +func BlockBranch(block *ast.BlockStmt) Branch { + blockLen := len(block.List) + if blockLen == 0 { + return Empty.Branch() + } + + branch := StmtBranch(block.List[blockLen-1]) + branch.HasDecls = hasDecls(block) + return branch +} + +// StmtBranch gets the Branch of an ast.Stmt. +func StmtBranch(stmt ast.Stmt) Branch { + switch stmt := stmt.(type) { + case *ast.ReturnStmt: + return Return.Branch() + case *ast.BlockStmt: + return BlockBranch(stmt) + case *ast.BranchStmt: + switch stmt.Tok { + case token.BREAK: + return Break.Branch() + case token.CONTINUE: + return Continue.Branch() + case token.GOTO: + return Goto.Branch() + } + case *ast.ExprStmt: + fn, ok := ExprCall(stmt) + if !ok { + break + } + kind, ok := DeviatingFuncs[fn] + if ok { + return Branch{BranchKind: kind, Call: fn} + } + case *ast.EmptyStmt: + return Empty.Branch() + case *ast.LabeledStmt: + return StmtBranch(stmt.Stmt) + } + return Regular.Branch() +} + +// String returns a brief string representation +func (b Branch) String() string { + switch b.BranchKind { + case Panic, Exit: + return fmt.Sprintf("... %v()", b.Call) + default: + return b.BranchKind.String() + } +} + +// LongString returns a longer form string representation +func (b Branch) LongString() string { + switch b.BranchKind { + case Panic, Exit: + return fmt.Sprintf("call to %v function", b.Call) + default: + return b.BranchKind.LongString() + } +} + +func hasDecls(block *ast.BlockStmt) bool { + for _, stmt := range block.List { + switch stmt := stmt.(type) { + case *ast.DeclStmt: + return true + case *ast.AssignStmt: + if stmt.Tok == token.DEFINE { + return true + } + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go b/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go new file mode 100644 index 0000000000..41601d1e1d --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/branch_kind.go @@ -0,0 +1,101 @@ +package ifelse + +// BranchKind is a classifier for if-else branches. It says whether the branch is empty, +// and whether the branch ends with a statement that deviates control flow. +type BranchKind int + +const ( + // Empty branches do nothing + Empty BranchKind = iota + + // Return branches return from the current function + Return + + // Continue branches continue a surrounding "for" loop + Continue + + // Break branches break a surrounding "for" loop + Break + + // Goto branches conclude with a "goto" statement + Goto + + // Panic branches panic the current function + Panic + + // Exit branches end the program + Exit + + // Regular branches do not fit any category above + Regular +) + +// IsEmpty tests if the branch is empty +func (k BranchKind) IsEmpty() bool { return k == Empty } + +// Returns tests if the branch returns from the current function +func (k BranchKind) Returns() bool { return k == Return } + +// Deviates tests if the control does not flow to the first +// statement following the if-else chain. +func (k BranchKind) Deviates() bool { + switch k { + case Empty, Regular: + return false + case Return, Continue, Break, Goto, Panic, Exit: + return true + default: + panic("invalid kind") + } +} + +// Branch returns a Branch with the given kind +func (k BranchKind) Branch() Branch { return Branch{BranchKind: k} } + +// String returns a brief string representation +func (k BranchKind) String() string { + switch k { + case Empty: + return "" + case Regular: + return "..." + case Return: + return "... return" + case Continue: + return "... continue" + case Break: + return "... break" + case Goto: + return "... goto" + case Panic: + return "... panic()" + case Exit: + return "... os.Exit()" + default: + panic("invalid kind") + } +} + +// LongString returns a longer form string representation +func (k BranchKind) LongString() string { + switch k { + case Empty: + return "an empty block" + case Regular: + return "a regular statement" + case Return: + return "a return statement" + case Continue: + return "a continue statement" + case Break: + return "a break statement" + case Goto: + return "a goto statement" + case Panic: + return "a function call that panics" + case Exit: + return "a function call that exits the program" + default: + panic("invalid kind") + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/chain.go b/vendor/github.com/mgechev/revive/internal/ifelse/chain.go new file mode 100644 index 0000000000..9891635ee1 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/chain.go @@ -0,0 +1,10 @@ +package ifelse + +// Chain contains information about an if-else chain. +type Chain struct { + If Branch // what happens at the end of the "if" block + Else Branch // what happens at the end of the "else" block + HasInitializer bool // is there an "if"-initializer somewhere in the chain? + HasPriorNonDeviating bool // is there a prior "if" block that does NOT deviate control flow? + AtBlockEnd bool // whether the chain is placed at the end of the surrounding block +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/doc.go b/vendor/github.com/mgechev/revive/internal/ifelse/doc.go new file mode 100644 index 0000000000..0aa2c98175 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/doc.go @@ -0,0 +1,6 @@ +// Package ifelse provides helpers for analysing the control flow in if-else chains, +// presently used by the following rules: +// - early-return +// - indent-error-flow +// - superfluous-else +package ifelse diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/func.go b/vendor/github.com/mgechev/revive/internal/ifelse/func.go new file mode 100644 index 0000000000..7ba3519184 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/func.go @@ -0,0 +1,51 @@ +package ifelse + +import ( + "fmt" + "go/ast" +) + +// Call contains the name of a function that deviates control flow. +type Call struct { + Pkg string // The package qualifier of the function, if not built-in. + Name string // The function name. +} + +// DeviatingFuncs lists known control flow deviating function calls. +var DeviatingFuncs = map[Call]BranchKind{ + {"os", "Exit"}: Exit, + {"log", "Fatal"}: Exit, + {"log", "Fatalf"}: Exit, + {"log", "Fatalln"}: Exit, + {"", "panic"}: Panic, + {"log", "Panic"}: Panic, + {"log", "Panicf"}: Panic, + {"log", "Panicln"}: Panic, +} + +// ExprCall gets the Call of an ExprStmt, if any. +func ExprCall(expr *ast.ExprStmt) (Call, bool) { + call, ok := expr.X.(*ast.CallExpr) + if !ok { + return Call{}, false + } + switch v := call.Fun.(type) { + case *ast.Ident: + return Call{Name: v.Name}, true + case *ast.SelectorExpr: + if ident, ok := v.X.(*ast.Ident); ok { + return Call{Name: v.Sel.Name, Pkg: ident.Name}, true + } + } + return Call{}, false +} + +// String returns the function name with package qualifier (if any) +func (f Call) String() string { + switch { + case f.Pkg != "": + return fmt.Sprintf("%s.%s", f.Pkg, f.Name) + default: + return f.Name + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/rule.go b/vendor/github.com/mgechev/revive/internal/ifelse/rule.go new file mode 100644 index 0000000000..07ad456b65 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/rule.go @@ -0,0 +1,105 @@ +package ifelse + +import ( + "go/ast" + "go/token" + + "github.com/mgechev/revive/lint" +) + +// Rule is an interface for linters operating on if-else chains +type Rule interface { + CheckIfElse(chain Chain, args Args) (failMsg string) +} + +// Apply evaluates the given Rule on if-else chains found within the given AST, +// and returns the failures. +// +// Note that in if-else chain with multiple "if" blocks, only the *last* one is checked, +// that is to say, given: +// +// if foo { +// ... +// } else if bar { +// ... +// } else { +// ... +// } +// +// Only the block following "bar" is linted. This is because the rules that use this function +// do not presently have anything to say about earlier blocks in the chain. +func Apply(rule Rule, node ast.Node, target Target, args lint.Arguments) []lint.Failure { + v := &visitor{rule: rule, target: target} + for _, arg := range args { + if arg == PreserveScope { + v.args.PreserveScope = true + } + } + ast.Walk(v, node) + return v.failures +} + +type visitor struct { + failures []lint.Failure + target Target + rule Rule + args Args +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + block, ok := node.(*ast.BlockStmt) + if !ok { + return v + } + + for i, stmt := range block.List { + if ifStmt, ok := stmt.(*ast.IfStmt); ok { + v.visitChain(ifStmt, Chain{AtBlockEnd: i == len(block.List)-1}) + continue + } + ast.Walk(v, stmt) + } + return nil +} + +func (v *visitor) visitChain(ifStmt *ast.IfStmt, chain Chain) { + // look for other if-else chains nested inside this if { } block + ast.Walk(v, ifStmt.Body) + + if ifStmt.Else == nil { + // no else branch + return + } + + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + chain.HasInitializer = true + } + chain.If = BlockBranch(ifStmt.Body) + + switch elseBlock := ifStmt.Else.(type) { + case *ast.IfStmt: + if !chain.If.Deviates() { + chain.HasPriorNonDeviating = true + } + v.visitChain(elseBlock, chain) + case *ast.BlockStmt: + // look for other if-else chains nested inside this else { } block + ast.Walk(v, elseBlock) + + chain.Else = BlockBranch(elseBlock) + if failMsg := v.rule.CheckIfElse(chain, v.args); failMsg != "" { + if chain.HasInitializer { + // if statement has a := initializer, so we might need to move the assignment + // onto its own line in case the body references it + failMsg += " (move short variable declaration to its own line if necessary)" + } + v.failures = append(v.failures, lint.Failure{ + Confidence: 1, + Node: v.target.node(ifStmt), + Failure: failMsg, + }) + } + default: + panic("invalid node type for else") + } +} diff --git a/vendor/github.com/mgechev/revive/internal/ifelse/target.go b/vendor/github.com/mgechev/revive/internal/ifelse/target.go new file mode 100644 index 0000000000..81ff1c3037 --- /dev/null +++ b/vendor/github.com/mgechev/revive/internal/ifelse/target.go @@ -0,0 +1,25 @@ +package ifelse + +import "go/ast" + +// Target decides what line/column should be indicated by the rule in question. +type Target int + +const ( + // TargetIf means the text refers to the "if" + TargetIf Target = iota + + // TargetElse means the text refers to the "else" + TargetElse +) + +func (t Target) node(ifStmt *ast.IfStmt) ast.Node { + switch t { + case TargetIf: + return ifStmt + case TargetElse: + return ifStmt.Else + default: + panic("bad target") + } +} diff --git a/vendor/github.com/mgechev/revive/lint/config.go b/vendor/github.com/mgechev/revive/lint/config.go index 2763058046..7e51a93c28 100644 --- a/vendor/github.com/mgechev/revive/lint/config.go +++ b/vendor/github.com/mgechev/revive/lint/config.go @@ -3,16 +3,45 @@ package lint // Arguments is type used for the arguments of a rule. type Arguments = []interface{} +// FileFilters is type used for modeling file filters to apply to rules. +type FileFilters = []*FileFilter + // RuleConfig is type used for the rule configuration. type RuleConfig struct { Arguments Arguments Severity Severity Disabled bool + // Exclude - rule-level file excludes, TOML related (strings) + Exclude []string + // excludeFilters - regex-based file filters, initialized from Exclude + excludeFilters []*FileFilter +} + +// Initialize - should be called after reading from TOML file +func (rc *RuleConfig) Initialize() error { + for _, f := range rc.Exclude { + ff, err := ParseFileFilter(f) + if err != nil { + return err + } + rc.excludeFilters = append(rc.excludeFilters, ff) + } + return nil } // RulesConfig defines the config for all rules. type RulesConfig = map[string]RuleConfig +// MustExclude - checks if given filename `name` must be excluded +func (rc *RuleConfig) MustExclude(name string) bool { + for _, exclude := range rc.excludeFilters { + if exclude.MatchFileName(name) { + return true + } + } + return false +} + // DirectiveConfig is type used for the linter directive configuration. type DirectiveConfig struct { Severity Severity diff --git a/vendor/github.com/mgechev/revive/lint/doc.go b/vendor/github.com/mgechev/revive/lint/doc.go new file mode 100644 index 0000000000..7048adf4b6 --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/doc.go @@ -0,0 +1,2 @@ +// Package lint implements the linting machinery. +package lint diff --git a/vendor/github.com/mgechev/revive/lint/file.go b/vendor/github.com/mgechev/revive/lint/file.go index dcf0e608f6..23255304c5 100644 --- a/vendor/github.com/mgechev/revive/lint/file.go +++ b/vendor/github.com/mgechev/revive/lint/file.go @@ -102,6 +102,9 @@ func (f *File) lint(rules []Rule, config Config, failures chan Failure) { disabledIntervals := f.disabledIntervals(rules, mustSpecifyDisableReason, failures) for _, currentRule := range rules { ruleConfig := rulesConfig[currentRule.Name()] + if ruleConfig.MustExclude(f.Name) { + continue + } currentFailures := currentRule.Apply(f, ruleConfig.Arguments) for idx, failure := range currentFailures { if failure.RuleName == "" { diff --git a/vendor/github.com/mgechev/revive/lint/filefilter.go b/vendor/github.com/mgechev/revive/lint/filefilter.go new file mode 100644 index 0000000000..8da090b9cc --- /dev/null +++ b/vendor/github.com/mgechev/revive/lint/filefilter.go @@ -0,0 +1,128 @@ +package lint + +import ( + "fmt" + "regexp" + "strings" +) + +// FileFilter - file filter to exclude some files for rule +// supports whole +// 1. file/dir names : pkg/mypkg/my.go, +// 2. globs: **/*.pb.go, +// 3. regexes (~ prefix) ~-tmp\.\d+\.go +// 4. special test marker `TEST` - treats as `~_test\.go` +type FileFilter struct { + // raw definition of filter inside config + raw string + // don't care what was at start, will use regexes inside + rx *regexp.Regexp + // marks filter as matching everything + matchesAll bool + // marks filter as matching nothing + matchesNothing bool +} + +// ParseFileFilter - creates [FileFilter] for given raw filter +// if empty string, it matches nothing +// if `*`, or `~`, it matches everything +// while regexp could be invalid, it could return it's compilation error +func ParseFileFilter(rawFilter string) (*FileFilter, error) { + rawFilter = strings.TrimSpace(rawFilter) + result := new(FileFilter) + result.raw = rawFilter + result.matchesNothing = len(result.raw) == 0 + result.matchesAll = result.raw == "*" || result.raw == "~" + if !result.matchesAll && !result.matchesNothing { + if err := result.prepareRegexp(); err != nil { + return nil, err + } + } + return result, nil +} + +func (ff *FileFilter) String() string { return ff.raw } + +// MatchFileName - checks if file name matches filter +func (ff *FileFilter) MatchFileName(name string) bool { + if ff.matchesAll { + return true + } + if ff.matchesNothing { + return false + } + name = strings.ReplaceAll(name, "\\", "/") + return ff.rx.MatchString(name) +} + +var fileFilterInvalidGlobRegexp = regexp.MustCompile(`[^/]\*\*[^/]`) +var escapeRegexSymbols = ".+{}()[]^$" + +func (ff *FileFilter) prepareRegexp() error { + var err error + var src = ff.raw + if src == "TEST" { + src = "~_test\\.go" + } + if strings.HasPrefix(src, "~") { + ff.rx, err = regexp.Compile(src[1:]) + if err != nil { + return fmt.Errorf("invalid file filter [%s], regexp compile error: [%v]", ff.raw, err) + } + return nil + } + /* globs */ + if strings.Contains(src, "*") { + if fileFilterInvalidGlobRegexp.MatchString(src) { + return fmt.Errorf("invalid file filter [%s], invalid glob pattern", ff.raw) + } + var rxBuild strings.Builder + rxBuild.WriteByte('^') + wasStar := false + justDirGlob := false + for _, c := range src { + if c == '*' { + if wasStar { + rxBuild.WriteString(`[\s\S]*`) + wasStar = false + justDirGlob = true + continue + } + wasStar = true + continue + } + if wasStar { + rxBuild.WriteString("[^/]*") + wasStar = false + } + if strings.ContainsRune(escapeRegexSymbols, c) { + rxBuild.WriteByte('\\') + } + rxBuild.WriteRune(c) + if c == '/' && justDirGlob { + rxBuild.WriteRune('?') + } + justDirGlob = false + } + if wasStar { + rxBuild.WriteString("[^/]*") + } + rxBuild.WriteByte('$') + ff.rx, err = regexp.Compile(rxBuild.String()) + if err != nil { + return fmt.Errorf("invalid file filter [%s], regexp compile error after glob expand: [%v]", ff.raw, err) + } + return nil + } + + // it's whole file mask, just escape dots and normilze separators + fillRx := src + fillRx = strings.ReplaceAll(fillRx, "\\", "/") + fillRx = strings.ReplaceAll(fillRx, ".", `\.`) + fillRx = "^" + fillRx + "$" + ff.rx, err = regexp.Compile(fillRx) + if err != nil { + return fmt.Errorf("invalid file filter [%s], regexp compile full path: [%v]", ff.raw, err) + } + return nil +} diff --git a/vendor/github.com/mgechev/revive/lint/utils.go b/vendor/github.com/mgechev/revive/lint/utils.go index 28657c6df0..6ccfb0ef29 100644 --- a/vendor/github.com/mgechev/revive/lint/utils.go +++ b/vendor/github.com/mgechev/revive/lint/utils.go @@ -6,7 +6,7 @@ import ( ) // Name returns a different name if it should be different. -func Name(name string, whitelist, blacklist []string) (should string) { +func Name(name string, allowlist, blocklist []string) (should string) { // Fast path for simple cases: "_" and all lowercase. if name == "_" { return name @@ -57,12 +57,12 @@ func Name(name string, whitelist, blacklist []string) (should string) { // [w,i) is a word. word := string(runes[w:i]) ignoreInitWarnings := map[string]bool{} - for _, i := range whitelist { + for _, i := range allowlist { ignoreInitWarnings[i] = true } extraInits := map[string]bool{} - for _, i := range blacklist { + for _, i := range blocklist { extraInits[i] = true } @@ -71,6 +71,10 @@ func Name(name string, whitelist, blacklist []string) (should string) { if w == 0 && unicode.IsLower(runes[w]) { u = strings.ToLower(u) } + // Keep lowercase s for IDs + if u == "IDS" { + u = "IDs" + } // All the common initialisms are ASCII, // so we can replace the bytes exactly. copy(runes[w:], []rune(u)) @@ -99,6 +103,7 @@ var commonInitialisms = map[string]bool{ "HTTP": true, "HTTPS": true, "ID": true, + "IDS": true, "IP": true, "JSON": true, "LHS": true, diff --git a/vendor/github.com/mgechev/revive/rule/add-constant.go b/vendor/github.com/mgechev/revive/rule/add-constant.go index 414be38c39..86182623a9 100644 --- a/vendor/github.com/mgechev/revive/rule/add-constant.go +++ b/vendor/github.com/mgechev/revive/rule/add-constant.go @@ -3,6 +3,7 @@ package rule import ( "fmt" "go/ast" + "regexp" "strconv" "strings" "sync" @@ -17,13 +18,13 @@ const ( kindSTRING = "STRING" ) -type whiteList map[string]map[string]bool +type allowList map[string]map[string]bool -func newWhiteList() whiteList { +func newAllowList() allowList { return map[string]map[string]bool{kindINT: {}, kindFLOAT: {}, kindSTRING: {}} } -func (wl whiteList) add(kind, list string) { +func (wl allowList) add(kind, list string) { elems := strings.Split(list, ",") for _, e := range elems { wl[kind][e] = true @@ -32,8 +33,9 @@ func (wl whiteList) add(kind, list string) { // AddConstantRule lints unused params in functions. type AddConstantRule struct { - whiteList whiteList - strLitLimit int + allowList allowList + ignoreFunctions []*regexp.Regexp + strLitLimit int sync.Mutex } @@ -47,7 +49,14 @@ func (r *AddConstantRule) Apply(file *lint.File, arguments lint.Arguments) []lin failures = append(failures, failure) } - w := lintAddConstantRule{onFailure: onFailure, strLits: make(map[string]int), strLitLimit: r.strLitLimit, whiteLst: r.whiteList} + w := &lintAddConstantRule{ + onFailure: onFailure, + strLits: make(map[string]int), + strLitLimit: r.strLitLimit, + allowList: r.allowList, + ignoreFunctions: r.ignoreFunctions, + structTags: make(map[*ast.BasicLit]struct{}), + } ast.Walk(w, file.AST) @@ -60,30 +69,93 @@ func (*AddConstantRule) Name() string { } type lintAddConstantRule struct { - onFailure func(lint.Failure) - strLits map[string]int - strLitLimit int - whiteLst whiteList + onFailure func(lint.Failure) + strLits map[string]int + strLitLimit int + allowList allowList + ignoreFunctions []*regexp.Regexp + structTags map[*ast.BasicLit]struct{} } -func (w lintAddConstantRule) Visit(node ast.Node) ast.Visitor { +func (w *lintAddConstantRule) Visit(node ast.Node) ast.Visitor { + if node == nil { + return nil + } + switch n := node.(type) { + case *ast.CallExpr: + w.checkFunc(n) + return nil case *ast.GenDecl: return nil // skip declarations case *ast.BasicLit: - switch kind := n.Kind.String(); kind { - case kindFLOAT, kindINT: - w.checkNumLit(kind, n) - case kindSTRING: - w.checkStrLit(n) + if !w.isStructTag(n) { + w.checkLit(n) + } + case *ast.StructType: + if n.Fields != nil { + for _, field := range n.Fields.List { + if field.Tag != nil { + w.structTags[field.Tag] = struct{}{} + } + } } } return w } -func (w lintAddConstantRule) checkStrLit(n *ast.BasicLit) { - if w.whiteLst[kindSTRING][n.Value] { +func (w *lintAddConstantRule) checkFunc(expr *ast.CallExpr) { + fName := w.getFuncName(expr) + + for _, arg := range expr.Args { + switch t := arg.(type) { + case *ast.CallExpr: + w.checkFunc(t) + case *ast.BasicLit: + if w.isIgnoredFunc(fName) { + continue + } + w.checkLit(t) + } + } +} + +func (*lintAddConstantRule) getFuncName(expr *ast.CallExpr) string { + switch f := expr.Fun.(type) { + case *ast.SelectorExpr: + switch prefix := f.X.(type) { + case *ast.Ident: + return prefix.Name + "." + f.Sel.Name + } + case *ast.Ident: + return f.Name + } + + return "" +} + +func (w *lintAddConstantRule) checkLit(n *ast.BasicLit) { + switch kind := n.Kind.String(); kind { + case kindFLOAT, kindINT: + w.checkNumLit(kind, n) + case kindSTRING: + w.checkStrLit(n) + } +} + +func (w *lintAddConstantRule) isIgnoredFunc(fName string) bool { + for _, pattern := range w.ignoreFunctions { + if pattern.MatchString(fName) { + return true + } + } + + return false +} + +func (w *lintAddConstantRule) checkStrLit(n *ast.BasicLit) { + if w.allowList[kindSTRING][n.Value] { return } @@ -102,8 +174,8 @@ func (w lintAddConstantRule) checkStrLit(n *ast.BasicLit) { } } -func (w lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { - if w.whiteLst[kind][n.Value] { +func (w *lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { + if w.allowList[kind][n.Value] { return } @@ -115,15 +187,20 @@ func (w lintAddConstantRule) checkNumLit(kind string, n *ast.BasicLit) { }) } +func (w *lintAddConstantRule) isStructTag(n *ast.BasicLit) bool { + _, ok := w.structTags[n] + return ok +} + func (r *AddConstantRule) configure(arguments lint.Arguments) { r.Lock() defer r.Unlock() - if r.whiteList == nil { + if r.allowList == nil { r.strLitLimit = defaultStrLitLimit - r.whiteList = newWhiteList() + r.allowList = newAllowList() if len(arguments) > 0 { - args, ok := arguments[0].(map[string]interface{}) + args, ok := arguments[0].(map[string]any) if !ok { panic(fmt.Sprintf("Invalid argument to the add-constant rule. Expecting a k,v map, got %T", arguments[0])) } @@ -146,7 +223,7 @@ func (r *AddConstantRule) configure(arguments lint.Arguments) { if !ok { panic(fmt.Sprintf("Invalid argument to the add-constant rule, string expected. Got '%v' (%T)", v, v)) } - r.whiteList.add(kind, list) + r.allowList.add(kind, list) case "maxLitCount": sl, ok := v.(string) if !ok { @@ -158,6 +235,25 @@ func (r *AddConstantRule) configure(arguments lint.Arguments) { panic(fmt.Sprintf("Invalid argument to the add-constant rule, expecting string representation of an integer. Got '%v'", v)) } r.strLitLimit = limit + case "ignoreFuncs": + excludes, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the ignoreFuncs parameter of add-constant rule, string expected. Got '%v' (%T)", v, v)) + } + + for _, exclude := range strings.Split(excludes, ",") { + exclude = strings.Trim(exclude, " ") + if exclude == "" { + panic("Invalid argument to the ignoreFuncs parameter of add-constant rule, expected regular expression must not be empty.") + } + + exp, err := regexp.Compile(exclude) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the ignoreFuncs parameter of add-constant rule: regexp %q does not compile: %v", exclude, err)) + } + + r.ignoreFunctions = append(r.ignoreFunctions, exp) + } } } } diff --git a/vendor/github.com/mgechev/revive/rule/argument-limit.go b/vendor/github.com/mgechev/revive/rule/argument-limit.go index 8042da15e3..8120288fd5 100644 --- a/vendor/github.com/mgechev/revive/rule/argument-limit.go +++ b/vendor/github.com/mgechev/revive/rule/argument-limit.go @@ -14,10 +14,16 @@ type ArgumentsLimitRule struct { sync.Mutex } +const defaultArgumentsLimit = 8 + func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.total == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + r.total = defaultArgumentsLimit + return + } total, ok := arguments[0].(int64) // Alt. non panicking version if !ok { @@ -25,7 +31,6 @@ func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { } r.total = int(total) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/banned-characters.go b/vendor/github.com/mgechev/revive/rule/banned-characters.go index 76fa2235a9..12997bae11 100644 --- a/vendor/github.com/mgechev/revive/rule/banned-characters.go +++ b/vendor/github.com/mgechev/revive/rule/banned-characters.go @@ -19,11 +19,11 @@ const bannedCharsRuleName = "banned-characters" func (r *BannedCharsRule) configure(arguments lint.Arguments) { r.Lock() - if r.bannedCharList == nil { + defer r.Unlock() + if r.bannedCharList == nil && len(arguments) > 0 { checkNumberOfArguments(1, arguments, bannedCharsRuleName) r.bannedCharList = r.getBannedCharsList(arguments) } - r.Unlock() } // Apply applied the rule to the given file. diff --git a/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go b/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go index a9c11a7d0b..1973faef87 100644 --- a/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go +++ b/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go @@ -16,10 +16,17 @@ type CognitiveComplexityRule struct { sync.Mutex } +const defaultMaxCognitiveComplexity = 7 + func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.maxComplexity == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + + if len(arguments) < 1 { + r.maxComplexity = defaultMaxCognitiveComplexity + return + } complexity, ok := arguments[0].(int64) if !ok { @@ -27,7 +34,6 @@ func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { } r.maxComplexity = int(complexity) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/comment-spacings.go b/vendor/github.com/mgechev/revive/rule/comment-spacings.go new file mode 100644 index 0000000000..2b8240ca58 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/comment-spacings.go @@ -0,0 +1,91 @@ +package rule + +import ( + "fmt" + "strings" + "sync" + + "github.com/mgechev/revive/lint" +) + +// CommentSpacingsRule check the whether there is a space between +// the comment symbol( // ) and the start of the comment text +type CommentSpacingsRule struct { + allowList []string + sync.Mutex +} + +func (r *CommentSpacingsRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.allowList == nil { + r.allowList = []string{ + "//go:", + "//revive:", + "//nolint:", + } + + for _, arg := range arguments { + allow, ok := arg.(string) // Alt. non panicking version + if !ok { + panic(fmt.Sprintf("invalid argument %v for %s; expected string but got %T", arg, r.Name(), arg)) + } + r.allowList = append(r.allowList, `//`+allow+`:`) + } + } +} + +// Apply the rule. +func (r *CommentSpacingsRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) + + var failures []lint.Failure + + for _, cg := range file.AST.Comments { + for _, comment := range cg.List { + commentLine := comment.Text + if len(commentLine) < 3 { + continue // nothing to do + } + + isMultiLineComment := commentLine[1] == '*' + isOK := commentLine[2] == '\n' + if isMultiLineComment && isOK { + continue + } + + isOK = (commentLine[2] == ' ') || (commentLine[2] == '\t') + if isOK { + continue + } + + if r.isAllowed(commentLine) { + continue + } + + failures = append(failures, lint.Failure{ + Node: comment, + Confidence: 1, + Category: "style", + Failure: "no space between comment delimiter and comment text", + }) + } + } + return failures +} + +// Name yields this rule name. +func (*CommentSpacingsRule) Name() string { + return "comment-spacings" +} + +func (r *CommentSpacingsRule) isAllowed(line string) bool { + for _, allow := range r.allowList { + if strings.HasPrefix(line, allow) { + return true + } + } + + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/confusing-naming.go b/vendor/github.com/mgechev/revive/rule/confusing-naming.go index 34cdb907a8..febfd88245 100644 --- a/vendor/github.com/mgechev/revive/rule/confusing-naming.go +++ b/vendor/github.com/mgechev/revive/rule/confusing-naming.go @@ -27,10 +27,10 @@ type packages struct { func (ps *packages) methodNames(lp *lint.Package) pkgMethods { ps.mu.Lock() + defer ps.mu.Unlock() for _, pkg := range ps.pkgs { if pkg.pkg == lp { - ps.mu.Unlock() return pkg } } @@ -38,7 +38,6 @@ func (ps *packages) methodNames(lp *lint.Package) pkgMethods { pkgm := pkgMethods{pkg: lp, methods: make(map[string]map[string]*referenceMethod), mu: &sync.Mutex{}} ps.pkgs = append(ps.pkgs, pkgm) - ps.mu.Unlock() return pkgm } @@ -112,7 +111,7 @@ func checkMethodName(holder string, id *ast.Ident, w *lintConfusingNames) { pkgm.methods[holder] = make(map[string]*referenceMethod, 1) } - // update the black list + // update the block list if pkgm.methods[holder] == nil { println("no entry for '", holder, "'") } @@ -137,8 +136,11 @@ func getStructName(r *ast.FieldList) string { t := r.List[0].Type - if p, _ := t.(*ast.StarExpr); p != nil { // if a pointer receiver => dereference pointer receiver types - t = p.X + switch v := t.(type) { + case *ast.StarExpr: + t = v.X + case *ast.IndexExpr: + t = v.X } if p, _ := t.(*ast.Ident); p != nil { diff --git a/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go b/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go index 9abc95d67c..36cd641f74 100644 --- a/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go +++ b/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go @@ -11,7 +11,7 @@ import ( type ConstantLogicalExprRule struct{} // Apply applies the rule to given file. -func (r *ConstantLogicalExprRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (*ConstantLogicalExprRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure onFailure := func(failure lint.Failure) { @@ -63,7 +63,7 @@ func (w *lintConstantLogicalExpr) Visit(node ast.Node) ast.Visitor { return w } -func (w *lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) bool { +func (*lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) bool { switch t { case token.LAND, token.LOR, token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ: return true @@ -72,7 +72,7 @@ func (w *lintConstantLogicalExpr) isOperatorWithLogicalResult(t token.Token) boo return false } -func (w *lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool { +func (*lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool { switch t { case token.EQL, token.LEQ, token.GEQ: return true @@ -81,7 +81,7 @@ func (w *lintConstantLogicalExpr) isEqualityOperator(t token.Token) bool { return false } -func (w *lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool { +func (*lintConstantLogicalExpr) isInequalityOperator(t token.Token) bool { switch t { case token.LSS, token.GTR, token.NEQ: return true diff --git a/vendor/github.com/mgechev/revive/rule/context-as-argument.go b/vendor/github.com/mgechev/revive/rule/context-as-argument.go index 3c400065e1..e0c8cfa5e9 100644 --- a/vendor/github.com/mgechev/revive/rule/context-as-argument.go +++ b/vendor/github.com/mgechev/revive/rule/context-as-argument.go @@ -82,7 +82,7 @@ func (w lintContextArguments) Visit(n ast.Node) ast.Visitor { func getAllowTypesFromArguments(args lint.Arguments) map[string]struct{} { allowTypesBefore := []string{} if len(args) >= 1 { - argKV, ok := args[0].(map[string]interface{}) + argKV, ok := args[0].(map[string]any) if !ok { panic(fmt.Sprintf("Invalid argument to the context-as-argument rule. Expecting a k,v map, got %T", args[0])) } diff --git a/vendor/github.com/mgechev/revive/rule/cyclomatic.go b/vendor/github.com/mgechev/revive/rule/cyclomatic.go index afd41818b8..9f6d50043d 100644 --- a/vendor/github.com/mgechev/revive/rule/cyclomatic.go +++ b/vendor/github.com/mgechev/revive/rule/cyclomatic.go @@ -17,10 +17,16 @@ type CyclomaticRule struct { sync.Mutex } +const defaultMaxCyclomaticComplexity = 10 + func (r *CyclomaticRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.maxComplexity == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + r.maxComplexity = defaultMaxCyclomaticComplexity + return + } complexity, ok := arguments[0].(int64) // Alt. non panicking version if !ok { @@ -28,7 +34,6 @@ func (r *CyclomaticRule) configure(arguments lint.Arguments) { } r.maxComplexity = int(complexity) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/datarace.go b/vendor/github.com/mgechev/revive/rule/datarace.go index 26fcadcdc9..39e96696ad 100644 --- a/vendor/github.com/mgechev/revive/rule/datarace.go +++ b/vendor/github.com/mgechev/revive/rule/datarace.go @@ -53,7 +53,7 @@ func (w lintDataRaces) Visit(n ast.Node) ast.Visitor { return nil } -func (w lintDataRaces) ExtractReturnIDs(fields []*ast.Field) map[*ast.Object]struct{} { +func (lintDataRaces) ExtractReturnIDs(fields []*ast.Field) map[*ast.Object]struct{} { r := map[*ast.Object]struct{}{} for _, f := range fields { for _, id := range f.Names { @@ -111,7 +111,7 @@ func (w lintFunctionForDataRaces) Visit(node ast.Node) ast.Visitor { return ok } - ids := pick(funcLit.Body, selectIDs, nil) + ids := pick(funcLit.Body, selectIDs) for _, id := range ids { id := id.(*ast.Ident) _, isRangeID := w.rangeIDs[id.Obj] diff --git a/vendor/github.com/mgechev/revive/rule/defer.go b/vendor/github.com/mgechev/revive/rule/defer.go index f8224fd4d1..adc6478aee 100644 --- a/vendor/github.com/mgechev/revive/rule/defer.go +++ b/vendor/github.com/mgechev/revive/rule/defer.go @@ -56,7 +56,7 @@ func (*DeferRule) allowFromArgs(args lint.Arguments) map[string]bool { return allow } - aa, ok := args[0].([]interface{}) + aa, ok := args[0].([]any) if !ok { panic(fmt.Sprintf("Invalid argument '%v' for 'defer' rule. Expecting []string, got %T", args[0], args[0])) } @@ -97,18 +97,21 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { w.newFailure("return in a defer function has no effect", n, 1.0, "logic", "return") } case *ast.CallExpr: - if !w.inADefer && isIdent(n.Fun, "recover") { + isCallToRecover := isIdent(n.Fun, "recover") + switch { + case !w.inADefer && isCallToRecover: // func fn() { recover() } // // confidence is not 1 because recover can be in a function that is deferred elsewhere w.newFailure("recover must be called inside a deferred function", n, 0.8, "logic", "recover") - } else if w.inADefer && !w.inAFuncLit && isIdent(n.Fun, "recover") { + case w.inADefer && !w.inAFuncLit && isCallToRecover: // defer helper(recover()) // // confidence is not truly 1 because this could be in a correctly-deferred func, // but it is very likely to be a misunderstanding of defer's behavior around arguments. w.newFailure("recover must be called inside a deferred function, this is executing recover immediately", n, 1, "logic", "immediate-recover") } + case *ast.DeferStmt: if isIdent(n.Call.Fun, "recover") { // defer recover() @@ -119,7 +122,12 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { } w.visitSubtree(n.Call.Fun, true, false, false) for _, a := range n.Call.Args { - w.visitSubtree(a, true, false, false) // check arguments, they should not contain recover() + switch a.(type) { + case *ast.FuncLit: + continue // too hard to analyze deferred calls with func literals args + default: + w.visitSubtree(a, true, false, false) // check arguments, they should not contain recover() + } } if w.inALoop { @@ -137,6 +145,7 @@ func (w lintDeferRule) Visit(node ast.Node) ast.Visitor { } } } + return nil } diff --git a/vendor/github.com/mgechev/revive/rule/doc.go b/vendor/github.com/mgechev/revive/rule/doc.go new file mode 100644 index 0000000000..55bf6caa6f --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/doc.go @@ -0,0 +1,2 @@ +// Package rule implements revive's linting rules. +package rule diff --git a/vendor/github.com/mgechev/revive/rule/dot-imports.go b/vendor/github.com/mgechev/revive/rule/dot-imports.go index 25ff526cb5..6b877677db 100644 --- a/vendor/github.com/mgechev/revive/rule/dot-imports.go +++ b/vendor/github.com/mgechev/revive/rule/dot-imports.go @@ -1,16 +1,23 @@ package rule import ( + "fmt" "go/ast" + "sync" "github.com/mgechev/revive/lint" ) // DotImportsRule lints given else constructs. -type DotImportsRule struct{} +type DotImportsRule struct { + sync.Mutex + allowedPackages allowPackages +} // Apply applies the rule to given file. -func (*DotImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *DotImportsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + var failures []lint.Failure fileAst := file.AST @@ -20,6 +27,7 @@ func (*DotImportsRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, + allowPackages: r.allowedPackages, } ast.Walk(walker, fileAst) @@ -32,16 +40,49 @@ func (*DotImportsRule) Name() string { return "dot-imports" } +func (r *DotImportsRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.allowedPackages != nil { + return + } + + r.allowedPackages = make(allowPackages) + if len(arguments) == 0 { + return + } + + args, ok := arguments[0].(map[string]any) + if !ok { + panic(fmt.Sprintf("Invalid argument to the dot-imports rule. Expecting a k,v map, got %T", arguments[0])) + } + + if allowedPkgArg, ok := args["allowedPackages"]; ok { + if pkgs, ok := allowedPkgArg.([]any); ok { + for _, p := range pkgs { + if pkg, ok := p.(string); ok { + r.allowedPackages.add(pkg) + } else { + panic(fmt.Sprintf("Invalid argument to the dot-imports rule, string expected. Got '%v' (%T)", p, p)) + } + } + } else { + panic(fmt.Sprintf("Invalid argument to the dot-imports rule, []string expected. Got '%v' (%T)", allowedPkgArg, allowedPkgArg)) + } + } +} + type lintImports struct { - file *lint.File - fileAst *ast.File - onFailure func(lint.Failure) + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) + allowPackages allowPackages } func (w lintImports) Visit(_ ast.Node) ast.Visitor { - for i, is := range w.fileAst.Imports { - _ = i - if is.Name != nil && is.Name.Name == "." && !w.file.IsTest() { + for _, is := range w.fileAst.Imports { + if is.Name != nil && is.Name.Name == "." && !w.allowPackages.isAllowedPackage(is.Path.Value) { w.onFailure(lint.Failure{ Confidence: 1, Failure: "should not use dot imports", @@ -52,3 +93,14 @@ func (w lintImports) Visit(_ ast.Node) ast.Visitor { } return nil } + +type allowPackages map[string]struct{} + +func (ap allowPackages) add(pkg string) { + ap[fmt.Sprintf(`"%s"`, pkg)] = struct{}{} // import path strings are with double quotes +} + +func (ap allowPackages) isAllowedPackage(pkg string) bool { + _, allowed := ap[pkg] + return allowed +} diff --git a/vendor/github.com/mgechev/revive/rule/early-return.go b/vendor/github.com/mgechev/revive/rule/early-return.go index bfbf6717c8..9c04a1dbe9 100644 --- a/vendor/github.com/mgechev/revive/rule/early-return.go +++ b/vendor/github.com/mgechev/revive/rule/early-return.go @@ -1,25 +1,19 @@ package rule import ( - "go/ast" + "fmt" + "github.com/mgechev/revive/internal/ifelse" "github.com/mgechev/revive/lint" ) -// EarlyReturnRule lints given else constructs. +// EarlyReturnRule finds opportunities to reduce nesting by inverting +// the condition of an "if" block. type EarlyReturnRule struct{} // Apply applies the rule to given file. -func (*EarlyReturnRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure - - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - w := lintEarlyReturnRule{onFailure: onFailure} - ast.Walk(w, file.AST) - return failures +func (e *EarlyReturnRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + return ifelse.Apply(e, file.AST, ifelse.TargetIf, args) } // Name returns the rule name. @@ -27,52 +21,31 @@ func (*EarlyReturnRule) Name() string { return "early-return" } -type lintEarlyReturnRule struct { - onFailure func(lint.Failure) -} - -func (w lintEarlyReturnRule) Visit(node ast.Node) ast.Visitor { - switch n := node.(type) { - case *ast.IfStmt: - if n.Else == nil { - // no else branch - return w - } - - elseBlock, ok := n.Else.(*ast.BlockStmt) - if !ok { - // is if-else-if - return w - } - - lenElseBlock := len(elseBlock.List) - if lenElseBlock < 1 { - // empty else block, continue (there is another rule that warns on empty blocks) - return w - } +// CheckIfElse evaluates the rule against an ifelse.Chain. +func (*EarlyReturnRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) (failMsg string) { + if !chain.Else.Deviates() { + // this rule only applies if the else-block deviates control flow + return + } - lenThenBlock := len(n.Body.List) - if lenThenBlock < 1 { - // then block is empty thus the stmt can be simplified - w.onFailure(lint.Failure{ - Confidence: 1, - Node: n, - Failure: "if c { } else {... return} can be simplified to if !c { ... return }", - }) + if chain.HasPriorNonDeviating && !chain.If.IsEmpty() { + // if we de-indent this block then a previous branch + // might flow into it, affecting program behaviour + return + } - return w - } + if chain.If.Deviates() { + // avoid overlapping with superfluous-else + return + } - _, lastThenStmtIsReturn := n.Body.List[lenThenBlock-1].(*ast.ReturnStmt) - _, lastElseStmtIsReturn := elseBlock.List[lenElseBlock-1].(*ast.ReturnStmt) - if lastElseStmtIsReturn && !lastThenStmtIsReturn { - w.onFailure(lint.Failure{ - Confidence: 1, - Node: n, - Failure: "if c {...} else {... return } can be simplified to if !c { ... return } ...", - }) - } + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.If.HasDecls) { + // avoid increasing variable scope + return } - return w + if chain.If.IsEmpty() { + return fmt.Sprintf("if c { } else { %[1]v } can be simplified to if !c { %[1]v }", chain.Else) + } + return fmt.Sprintf("if c { ... } else { %[1]v } can be simplified to if !c { %[1]v } ...", chain.Else) } diff --git a/vendor/github.com/mgechev/revive/rule/empty-block.go b/vendor/github.com/mgechev/revive/rule/empty-block.go index 8a4a0fef19..25a052a0ef 100644 --- a/vendor/github.com/mgechev/revive/rule/empty-block.go +++ b/vendor/github.com/mgechev/revive/rule/empty-block.go @@ -40,6 +40,16 @@ func (w lintEmptyBlock) Visit(node ast.Node) ast.Visitor { case *ast.FuncLit: w.ignore[n.Body] = true return w + case *ast.SelectStmt: + w.ignore[n.Body] = true + return w + case *ast.ForStmt: + if len(n.Body.List) == 0 && n.Init == nil && n.Post == nil && n.Cond != nil { + if _, isCall := n.Cond.(*ast.CallExpr); isCall { + w.ignore[n.Body] = true + return w + } + } case *ast.RangeStmt: if len(n.Body.List) == 0 { w.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/enforce-map-style.go b/vendor/github.com/mgechev/revive/rule/enforce-map-style.go new file mode 100644 index 0000000000..36ac2374c2 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/enforce-map-style.go @@ -0,0 +1,164 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +type enforceMapStyleType string + +const ( + enforceMapStyleTypeAny enforceMapStyleType = "any" + enforceMapStyleTypeMake enforceMapStyleType = "make" + enforceMapStyleTypeLiteral enforceMapStyleType = "literal" +) + +func mapStyleFromString(s string) (enforceMapStyleType, error) { + switch s { + case string(enforceMapStyleTypeAny), "": + return enforceMapStyleTypeAny, nil + case string(enforceMapStyleTypeMake): + return enforceMapStyleTypeMake, nil + case string(enforceMapStyleTypeLiteral): + return enforceMapStyleTypeLiteral, nil + default: + return enforceMapStyleTypeAny, fmt.Errorf( + "invalid map style: %s (expecting one of %v)", + s, + []enforceMapStyleType{ + enforceMapStyleTypeAny, + enforceMapStyleTypeMake, + enforceMapStyleTypeLiteral, + }, + ) + } +} + +// EnforceMapStyleRule implements a rule to enforce `make(map[type]type)` over `map[type]type{}`. +type EnforceMapStyleRule struct { + configured bool + enforceMapStyle enforceMapStyleType + sync.Mutex +} + +func (r *EnforceMapStyleRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + if len(arguments) < 1 { + r.enforceMapStyle = enforceMapStyleTypeAny + return + } + + enforceMapStyle, ok := arguments[0].(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for 'enforce-map-style' rule. Expecting string, got %T", arguments[0], arguments[0])) + } + + var err error + r.enforceMapStyle, err = mapStyleFromString(enforceMapStyle) + + if err != nil { + panic(fmt.Sprintf("Invalid argument to the enforce-map-style rule: %v", err)) + } +} + +// Apply applies the rule to given file. +func (r *EnforceMapStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + if r.enforceMapStyle == enforceMapStyleTypeAny { + // this linter is not configured + return nil + } + + var failures []lint.Failure + + astFile := file.AST + ast.Inspect(astFile, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.CompositeLit: + if r.enforceMapStyle != enforceMapStyleTypeMake { + return true + } + + if !r.isMapType(v.Type) { + return true + } + + if len(v.Elts) > 0 { + // not an empty map + return true + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v, + Category: "style", + Failure: "use make(map[type]type) instead of map[type]type{}", + }) + case *ast.CallExpr: + if r.enforceMapStyle != enforceMapStyleTypeLiteral { + // skip any function calls, even if it's make(map[type]type) + // we don't want to report it if literals are not enforced + return true + } + + ident, ok := v.Fun.(*ast.Ident) + if !ok || ident.Name != "make" { + return true + } + + if len(v.Args) != 1 { + // skip make(map[type]type, size) and invalid empty declarations + return true + } + + if !r.isMapType(v.Args[0]) { + // not a map type + return true + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v.Args[0], + Category: "style", + Failure: "use map[type]type{} instead of make(map[type]type)", + }) + } + return true + }) + + return failures +} + +// Name returns the rule name. +func (*EnforceMapStyleRule) Name() string { + return "enforce-map-style" +} + +func (r *EnforceMapStyleRule) isMapType(v ast.Expr) bool { + switch t := v.(type) { + case *ast.MapType: + return true + case *ast.Ident: + if t.Obj == nil { + return false + } + typeSpec, ok := t.Obj.Decl.(*ast.TypeSpec) + if !ok { + return false + } + return r.isMapType(typeSpec.Type) + default: + return false + } +} diff --git a/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go b/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go new file mode 100644 index 0000000000..067082b1b0 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go @@ -0,0 +1,191 @@ +package rule + +import ( + "fmt" + "go/ast" + "go/types" + "sync" + + "github.com/mgechev/revive/lint" +) + +type enforceRepeatedArgTypeStyleType string + +const ( + enforceRepeatedArgTypeStyleTypeAny enforceRepeatedArgTypeStyleType = "any" + enforceRepeatedArgTypeStyleTypeShort enforceRepeatedArgTypeStyleType = "short" + enforceRepeatedArgTypeStyleTypeFull enforceRepeatedArgTypeStyleType = "full" +) + +func repeatedArgTypeStyleFromString(s string) enforceRepeatedArgTypeStyleType { + switch s { + case string(enforceRepeatedArgTypeStyleTypeAny), "": + return enforceRepeatedArgTypeStyleTypeAny + case string(enforceRepeatedArgTypeStyleTypeShort): + return enforceRepeatedArgTypeStyleTypeShort + case string(enforceRepeatedArgTypeStyleTypeFull): + return enforceRepeatedArgTypeStyleTypeFull + default: + err := fmt.Errorf( + "invalid repeated arg type style: %s (expecting one of %v)", + s, + []enforceRepeatedArgTypeStyleType{ + enforceRepeatedArgTypeStyleTypeAny, + enforceRepeatedArgTypeStyleTypeShort, + enforceRepeatedArgTypeStyleTypeFull, + }, + ) + + panic(fmt.Sprintf("Invalid argument to the enforce-repeated-arg-type-style rule: %v", err)) + } +} + +// EnforceRepeatedArgTypeStyleRule implements a rule to enforce repeated argument type style. +type EnforceRepeatedArgTypeStyleRule struct { + configured bool + funcArgStyle enforceRepeatedArgTypeStyleType + funcRetValStyle enforceRepeatedArgTypeStyleType + + sync.Mutex +} + +func (r *EnforceRepeatedArgTypeStyleRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + r.funcArgStyle = enforceRepeatedArgTypeStyleTypeAny + r.funcRetValStyle = enforceRepeatedArgTypeStyleTypeAny + + if len(arguments) == 0 { + return + } + + switch funcArgStyle := arguments[0].(type) { + case string: + r.funcArgStyle = repeatedArgTypeStyleFromString(funcArgStyle) + r.funcRetValStyle = repeatedArgTypeStyleFromString(funcArgStyle) + case map[string]any: // expecting map[string]string + for k, v := range funcArgStyle { + switch k { + case "funcArgStyle": + val, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid map value type for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v)) + } + r.funcArgStyle = repeatedArgTypeStyleFromString(val) + case "funcRetValStyle": + val, ok := v.(string) + if !ok { + panic(fmt.Sprintf("Invalid map value '%v' for 'enforce-repeated-arg-type-style' rule. Expecting string, got %T", v, v)) + } + r.funcRetValStyle = repeatedArgTypeStyleFromString(val) + default: + panic(fmt.Sprintf("Invalid map key for 'enforce-repeated-arg-type-style' rule. Expecting 'funcArgStyle' or 'funcRetValStyle', got %v", k)) + } + } + default: + panic(fmt.Sprintf("Invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0])) + } +} + +// Apply applies the rule to a given file. +func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeAny && r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeAny { + // This linter is not configured, return no failures. + return nil + } + + var failures []lint.Failure + + err := file.Pkg.TypeCheck() + if err != nil { + // the file has other issues + return nil + } + typesInfo := file.Pkg.TypesInfo() + + astFile := file.AST + ast.Inspect(astFile, func(n ast.Node) bool { + switch fn := n.(type) { + case *ast.FuncDecl: + if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeFull { + if fn.Type.Params != nil { + for _, field := range fn.Type.Params.List { + if len(field.Names) > 1 { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "argument types should not be omitted", + }) + } + } + } + } + + if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeShort { + var prevType ast.Expr + if fn.Type.Params != nil { + for _, field := range fn.Type.Params.List { + if types.Identical(typesInfo.Types[field.Type].Type, typesInfo.Types[prevType].Type) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "repeated argument type can be omitted", + }) + } + prevType = field.Type + } + } + } + + if r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeFull { + if fn.Type.Results != nil { + for _, field := range fn.Type.Results.List { + if len(field.Names) > 1 { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "return types should not be omitted", + }) + } + } + } + } + + if r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeShort { + var prevType ast.Expr + if fn.Type.Results != nil { + for _, field := range fn.Type.Results.List { + if field.Names != nil && types.Identical(typesInfo.Types[field.Type].Type, typesInfo.Types[prevType].Type) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: field, + Category: "style", + Failure: "repeated return type can be omitted", + }) + } + prevType = field.Type + } + } + } + } + return true + }) + + return failures +} + +// Name returns the name of the linter rule. +func (*EnforceRepeatedArgTypeStyleRule) Name() string { + return "enforce-repeated-arg-type-style" +} diff --git a/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go b/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go new file mode 100644 index 0000000000..abaf20be0e --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go @@ -0,0 +1,193 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +type enforceSliceStyleType string + +const ( + enforceSliceStyleTypeAny enforceSliceStyleType = "any" + enforceSliceStyleTypeMake enforceSliceStyleType = "make" + enforceSliceStyleTypeLiteral enforceSliceStyleType = "literal" +) + +func sliceStyleFromString(s string) (enforceSliceStyleType, error) { + switch s { + case string(enforceSliceStyleTypeAny), "": + return enforceSliceStyleTypeAny, nil + case string(enforceSliceStyleTypeMake): + return enforceSliceStyleTypeMake, nil + case string(enforceSliceStyleTypeLiteral): + return enforceSliceStyleTypeLiteral, nil + default: + return enforceSliceStyleTypeAny, fmt.Errorf( + "invalid slice style: %s (expecting one of %v)", + s, + []enforceSliceStyleType{ + enforceSliceStyleTypeAny, + enforceSliceStyleTypeMake, + enforceSliceStyleTypeLiteral, + }, + ) + } +} + +// EnforceSliceStyleRule implements a rule to enforce `make([]type)` over `[]type{}`. +type EnforceSliceStyleRule struct { + configured bool + enforceSliceStyle enforceSliceStyleType + sync.Mutex +} + +func (r *EnforceSliceStyleRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + if len(arguments) < 1 { + r.enforceSliceStyle = enforceSliceStyleTypeAny + return + } + + enforceSliceStyle, ok := arguments[0].(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for 'enforce-slice-style' rule. Expecting string, got %T", arguments[0], arguments[0])) + } + + var err error + r.enforceSliceStyle, err = sliceStyleFromString(enforceSliceStyle) + + if err != nil { + panic(fmt.Sprintf("Invalid argument to the enforce-slice-style rule: %v", err)) + } +} + +// Apply applies the rule to given file. +func (r *EnforceSliceStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + if r.enforceSliceStyle == enforceSliceStyleTypeAny { + // this linter is not configured + return nil + } + + var failures []lint.Failure + + astFile := file.AST + ast.Inspect(astFile, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.CompositeLit: + if r.enforceSliceStyle != enforceSliceStyleTypeMake { + return true + } + + if !r.isSliceType(v.Type) { + return true + } + + if len(v.Elts) > 0 { + // not an empty slice + return true + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v, + Category: "style", + Failure: "use make([]type) instead of []type{} (or declare nil slice)", + }) + case *ast.CallExpr: + if r.enforceSliceStyle != enforceSliceStyleTypeLiteral { + // skip any function calls, even if it's make([]type) + // we don't want to report it if literals are not enforced + return true + } + + ident, ok := v.Fun.(*ast.Ident) + if !ok || ident.Name != "make" { + return true + } + + if len(v.Args) < 2 { + // skip invalid make declarations + return true + } + + if !r.isSliceType(v.Args[0]) { + // not a slice type + return true + } + + arg, ok := v.Args[1].(*ast.BasicLit) + if !ok { + // skip invalid make declarations + return true + } + + if arg.Value != "0" { + // skip slice with non-zero size + return true + } + + if len(v.Args) > 2 { + arg, ok := v.Args[2].(*ast.BasicLit) + if !ok { + // skip invalid make declarations + return true + } + + if arg.Value != "0" { + // skip non-zero capacity slice + return true + } + } + + failures = append(failures, lint.Failure{ + Confidence: 1, + Node: v.Args[0], + Category: "style", + Failure: "use []type{} instead of make([]type, 0) (or declare nil slice)", + }) + } + return true + }) + + return failures +} + +// Name returns the rule name. +func (*EnforceSliceStyleRule) Name() string { + return "enforce-slice-style" +} + +func (r *EnforceSliceStyleRule) isSliceType(v ast.Expr) bool { + switch t := v.(type) { + case *ast.ArrayType: + if t.Len != nil { + // array + return false + } + // slice + return true + case *ast.Ident: + if t.Obj == nil { + return false + } + typeSpec, ok := t.Obj.Decl.(*ast.TypeSpec) + if !ok { + return false + } + return r.isSliceType(typeSpec.Type) + default: + return false + } +} diff --git a/vendor/github.com/mgechev/revive/rule/file-header.go b/vendor/github.com/mgechev/revive/rule/file-header.go index 76f548f51f..a7d69ff2b1 100644 --- a/vendor/github.com/mgechev/revive/rule/file-header.go +++ b/vendor/github.com/mgechev/revive/rule/file-header.go @@ -21,21 +21,28 @@ var ( func (r *FileHeaderRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.header == "" { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + return + } + var ok bool r.header, ok = arguments[0].(string) if !ok { - panic(fmt.Sprintf("invalid argument for \"file-header\" rule: first argument should be a string, got %T", arguments[0])) + panic(fmt.Sprintf("invalid argument for \"file-header\" rule: argument should be a string, got %T", arguments[0])) } } - r.Unlock() } // Apply applies the rule to given file. func (r *FileHeaderRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { r.configure(arguments) + if r.header == "" { + return nil + } + failure := []lint.Failure{ { Node: file.AST, diff --git a/vendor/github.com/mgechev/revive/rule/flag-param.go b/vendor/github.com/mgechev/revive/rule/flag-param.go index 19a05f9fea..f9bfb712c4 100644 --- a/vendor/github.com/mgechev/revive/rule/flag-param.go +++ b/vendor/github.com/mgechev/revive/rule/flag-param.go @@ -88,7 +88,7 @@ func (w conditionVisitor) Visit(node ast.Node) ast.Visitor { return false } - uses := pick(ifStmt.Cond, fselect, nil) + uses := pick(ifStmt.Cond, fselect) if len(uses) < 1 { return w diff --git a/vendor/github.com/mgechev/revive/rule/function-length.go b/vendor/github.com/mgechev/revive/rule/function-length.go index d600d7a2a1..fd65884e97 100644 --- a/vendor/github.com/mgechev/revive/rule/function-length.go +++ b/vendor/github.com/mgechev/revive/rule/function-length.go @@ -19,13 +19,13 @@ type FunctionLength struct { func (r *FunctionLength) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if !r.configured { maxStmt, maxLines := r.parseArguments(arguments) r.maxStmt = int(maxStmt) r.maxLines = int(maxLines) r.configured = true } - r.Unlock() } // Apply applies the rule to given file. @@ -53,7 +53,14 @@ func (*FunctionLength) Name() string { return "function-length" } +const defaultFuncStmtsLimit = 50 +const defaultFuncLinesLimit = 75 + func (*FunctionLength) parseArguments(arguments lint.Arguments) (maxStmt, maxLines int64) { + if len(arguments) == 0 { + return defaultFuncStmtsLimit, defaultFuncLinesLimit + } + if len(arguments) != 2 { panic(fmt.Sprintf(`invalid configuration for "function-length" rule, expected 2 arguments but got %d`, len(arguments))) } @@ -164,7 +171,7 @@ func (w lintFuncLength) countFuncLitStmts(stmt ast.Expr) int { return 0 } -func (w lintFuncLength) countBodyListStmts(t interface{}) int { +func (w lintFuncLength) countBodyListStmts(t any) int { i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() return w.countStmts(i.([]ast.Stmt)) } diff --git a/vendor/github.com/mgechev/revive/rule/function-result-limit.go b/vendor/github.com/mgechev/revive/rule/function-result-limit.go index 5d2b87316a..6a0748011d 100644 --- a/vendor/github.com/mgechev/revive/rule/function-result-limit.go +++ b/vendor/github.com/mgechev/revive/rule/function-result-limit.go @@ -14,11 +14,16 @@ type FunctionResultsLimitRule struct { sync.Mutex } +const defaultResultsLimit = 3 + func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.max == 0 { - checkNumberOfArguments(1, arguments, r.Name()) - + if len(arguments) < 1 { + r.max = defaultResultsLimit + return + } max, ok := arguments[0].(int64) // Alt. non panicking version if !ok { panic(fmt.Sprintf(`invalid value passed as return results number to the "function-result-limit" rule; need int64 but got %T`, arguments[0])) @@ -28,7 +33,6 @@ func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { } r.max = int(max) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/identical-branches.go b/vendor/github.com/mgechev/revive/rule/identical-branches.go index b1a69097f6..9222c8a9c5 100644 --- a/vendor/github.com/mgechev/revive/rule/identical-branches.go +++ b/vendor/github.com/mgechev/revive/rule/identical-branches.go @@ -63,8 +63,10 @@ func (lintIdenticalBranches) identicalBranches(branches []*ast.BlockStmt) bool { } ref := gofmt(branches[0]) + refSize := len(branches[0].List) for i := 1; i < len(branches); i++ { - if gofmt(branches[i]) != ref { + currentSize := len(branches[i].List) + if currentSize != refSize || gofmt(branches[i]) != ref { return false } } diff --git a/vendor/github.com/mgechev/revive/rule/import-alias-naming.go b/vendor/github.com/mgechev/revive/rule/import-alias-naming.go new file mode 100644 index 0000000000..a6d096c8b2 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/import-alias-naming.go @@ -0,0 +1,126 @@ +package rule + +import ( + "fmt" + "regexp" + "sync" + + "github.com/mgechev/revive/lint" +) + +// ImportAliasNamingRule lints import alias naming. +type ImportAliasNamingRule struct { + configured bool + allowRegexp *regexp.Regexp + denyRegexp *regexp.Regexp + sync.Mutex +} + +const defaultImportAliasNamingAllowRule = "^[a-z][a-z0-9]{0,}$" + +var defaultImportAliasNamingAllowRegexp = regexp.MustCompile(defaultImportAliasNamingAllowRule) + +func (r *ImportAliasNamingRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + if r.configured { + return + } + + if len(arguments) == 0 { + r.allowRegexp = defaultImportAliasNamingAllowRegexp + return + } + + switch namingRule := arguments[0].(type) { + case string: + r.setAllowRule(namingRule) + case map[string]any: // expecting map[string]string + for k, v := range namingRule { + switch k { + case "allowRegex": + r.setAllowRule(v) + case "denyRegex": + r.setDenyRule(v) + default: + panic(fmt.Sprintf("Invalid map key for 'import-alias-naming' rule. Expecting 'allowRegex' or 'denyRegex', got %v", k)) + } + } + default: + panic(fmt.Sprintf("Invalid argument '%v' for 'import-alias-naming' rule. Expecting string or map[string]string, got %T", arguments[0], arguments[0])) + } + + if r.allowRegexp == nil && r.denyRegexp == nil { + r.allowRegexp = defaultImportAliasNamingAllowRegexp + } +} + +// Apply applies the rule to given file. +func (r *ImportAliasNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + var failures []lint.Failure + + for _, is := range file.AST.Imports { + path := is.Path + if path == nil { + continue + } + + alias := is.Name + if alias == nil || alias.Name == "_" || alias.Name == "." { // "_" and "." are special types of import aiases and should be processed by another linter rule + continue + } + + if r.allowRegexp != nil && !r.allowRegexp.MatchString(alias.Name) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("import name (%s) must match the regular expression: %s", alias.Name, r.allowRegexp.String()), + Node: alias, + Category: "imports", + }) + } + + if r.denyRegexp != nil && r.denyRegexp.MatchString(alias.Name) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("import name (%s) must NOT match the regular expression: %s", alias.Name, r.denyRegexp.String()), + Node: alias, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (*ImportAliasNamingRule) Name() string { + return "import-alias-naming" +} + +func (r *ImportAliasNamingRule) setAllowRule(value any) { + namingRule, ok := value.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for import-alias-naming allowRegexp rule. Expecting string, got %T", value, value)) + } + + namingRuleRegexp, err := regexp.Compile(namingRule) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the import-alias-naming allowRegexp rule. Expecting %q to be a valid regular expression, got: %v", namingRule, err)) + } + r.allowRegexp = namingRuleRegexp +} + +func (r *ImportAliasNamingRule) setDenyRule(value any) { + namingRule, ok := value.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument '%v' for import-alias-naming denyRegexp rule. Expecting string, got %T", value, value)) + } + + namingRuleRegexp, err := regexp.Compile(namingRule) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the import-alias-naming denyRegexp rule. Expecting %q to be a valid regular expression, got: %v", namingRule, err)) + } + r.denyRegexp = namingRuleRegexp +} diff --git a/vendor/github.com/mgechev/revive/rule/import-shadowing.go b/vendor/github.com/mgechev/revive/rule/import-shadowing.go index 2bab704d02..046aeb688e 100644 --- a/vendor/github.com/mgechev/revive/rule/import-shadowing.go +++ b/vendor/github.com/mgechev/revive/rule/import-shadowing.go @@ -29,6 +29,7 @@ func (*ImportShadowingRule) Apply(file *lint.File, _ lint.Arguments) []lint.Fail failures = append(failures, failure) }, alreadySeen: map[*ast.Object]struct{}{}, + skipIdents: map[*ast.Ident]struct{}{}, } ast.Walk(walker, fileAst) @@ -62,6 +63,7 @@ type importShadowing struct { importNames map[string]struct{} onFailure func(lint.Failure) alreadySeen map[*ast.Object]struct{} + skipIdents map[*ast.Ident]struct{} } // Visit visits AST nodes and checks if id nodes (ast.Ident) shadow an import name @@ -80,6 +82,10 @@ func (w importShadowing) Visit(n ast.Node) ast.Visitor { *ast.SelectorExpr, // skip analysis of selector expressions (anId.otherId): because if anId shadows an import name, it was already detected, and otherId does not shadows the import name *ast.StructType: // skip analysis of struct type because struct fields can not shadow an import name return nil + case *ast.FuncDecl: + if n.Recv != nil { + w.skipIdents[n.Name] = struct{}{} + } case *ast.Ident: if n == w.packageNameIdent { return nil // skip the ident corresponding to the package name of this file @@ -92,11 +98,12 @@ func (w importShadowing) Visit(n ast.Node) ast.Visitor { _, isImportName := w.importNames[id] _, alreadySeen := w.alreadySeen[n.Obj] - if isImportName && !alreadySeen { + _, skipIdent := w.skipIdents[n] + if isImportName && !alreadySeen && !skipIdent { w.onFailure(lint.Failure{ Confidence: 1, Node: n, - Category: "namming", + Category: "naming", Failure: fmt.Sprintf("The name '%s' shadows an import name", id), }) diff --git a/vendor/github.com/mgechev/revive/rule/imports-blacklist.go b/vendor/github.com/mgechev/revive/rule/imports-blacklist.go deleted file mode 100644 index 7106628155..0000000000 --- a/vendor/github.com/mgechev/revive/rule/imports-blacklist.go +++ /dev/null @@ -1,77 +0,0 @@ -package rule - -import ( - "fmt" - "regexp" - "sync" - - "github.com/mgechev/revive/lint" -) - -// ImportsBlacklistRule lints given else constructs. -type ImportsBlacklistRule struct { - blacklist []*regexp.Regexp - sync.Mutex -} - -var replaceRegexp = regexp.MustCompile(`/?\*\*/?`) - -func (r *ImportsBlacklistRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.blacklist == nil { - r.blacklist = make([]*regexp.Regexp, 0) - - for _, arg := range arguments { - argStr, ok := arg.(string) - if !ok { - panic(fmt.Sprintf("Invalid argument to the imports-blacklist rule. Expecting a string, got %T", arg)) - } - regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) - if err != nil { - panic(fmt.Sprintf("Invalid argument to the imports-blacklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) - } - r.blacklist = append(r.blacklist, regStr) - } - } -} - -func (r *ImportsBlacklistRule) isBlacklisted(path string) bool { - for _, regex := range r.blacklist { - if regex.MatchString(path) { - return true - } - } - return false -} - -// Apply applies the rule to given file. -func (r *ImportsBlacklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) - - var failures []lint.Failure - - if file.IsTest() { - return failures // skip, test file - } - - for _, is := range file.AST.Imports { - path := is.Path - if path != nil && r.isBlacklisted(path.Value) { - failures = append(failures, lint.Failure{ - Confidence: 1, - Failure: "should not use the following blacklisted import: " + path.Value, - Node: is, - Category: "imports", - }) - } - } - - return failures -} - -// Name returns the rule name. -func (*ImportsBlacklistRule) Name() string { - return "imports-blacklist" -} diff --git a/vendor/github.com/mgechev/revive/rule/imports-blocklist.go b/vendor/github.com/mgechev/revive/rule/imports-blocklist.go new file mode 100644 index 0000000000..431066403a --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/imports-blocklist.go @@ -0,0 +1,73 @@ +package rule + +import ( + "fmt" + "regexp" + "sync" + + "github.com/mgechev/revive/lint" +) + +// ImportsBlocklistRule lints given else constructs. +type ImportsBlocklistRule struct { + blocklist []*regexp.Regexp + sync.Mutex +} + +var replaceImportRegexp = regexp.MustCompile(`/?\*\*/?`) + +func (r *ImportsBlocklistRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.blocklist == nil { + r.blocklist = make([]*regexp.Regexp, 0) + + for _, arg := range arguments { + argStr, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting a string, got %T", arg)) + } + regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceImportRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) + } + r.blocklist = append(r.blocklist, regStr) + } + } +} + +func (r *ImportsBlocklistRule) isBlocklisted(path string) bool { + for _, regex := range r.blocklist { + if regex.MatchString(path) { + return true + } + } + return false +} + +// Apply applies the rule to given file. +func (r *ImportsBlocklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + var failures []lint.Failure + + for _, is := range file.AST.Imports { + path := is.Path + if path != nil && r.isBlocklisted(path.Value) { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: "should not use the following blocklisted import: " + path.Value, + Node: is, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (*ImportsBlocklistRule) Name() string { + return "imports-blocklist" +} diff --git a/vendor/github.com/mgechev/revive/rule/indent-error-flow.go b/vendor/github.com/mgechev/revive/rule/indent-error-flow.go index e455801c47..294ceef842 100644 --- a/vendor/github.com/mgechev/revive/rule/indent-error-flow.go +++ b/vendor/github.com/mgechev/revive/rule/indent-error-flow.go @@ -1,9 +1,7 @@ package rule import ( - "go/ast" - "go/token" - + "github.com/mgechev/revive/internal/ifelse" "github.com/mgechev/revive/lint" ) @@ -11,16 +9,8 @@ import ( type IndentErrorFlowRule struct{} // Apply applies the rule to given file. -func (*IndentErrorFlowRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure - - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - w := lintElse{make(map[*ast.IfStmt]bool), onFailure} - ast.Walk(w, file.AST) - return failures +func (e *IndentErrorFlowRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + return ifelse.Apply(e, file.AST, ifelse.TargetElse, args) } // Name returns the rule name. @@ -28,51 +18,28 @@ func (*IndentErrorFlowRule) Name() string { return "indent-error-flow" } -type lintElse struct { - ignore map[*ast.IfStmt]bool - onFailure func(lint.Failure) -} - -func (w lintElse) Visit(node ast.Node) ast.Visitor { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return w - } - if w.ignore[ifStmt] { - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - } - return w +// CheckIfElse evaluates the rule against an ifelse.Chain. +func (*IndentErrorFlowRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) (failMsg string) { + if !chain.If.Deviates() { + // this rule only applies if the if-block deviates control flow + return } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - return w - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return w - } - if len(ifStmt.Body.List) == 0 { - return w + + if chain.HasPriorNonDeviating { + // if we de-indent the "else" block then a previous branch + // might flow into it, affecting program behaviour + return } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } + + if !chain.If.Returns() { + // avoid overlapping with superfluous-else + return } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - if _, ok := lastStmt.(*ast.ReturnStmt); ok { - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" - } - w.onFailure(lint.Failure{ - Confidence: 1, - Node: ifStmt.Else, - Category: "indent", - Failure: "if block ends with a return statement, so drop this else and outdent its block" + extra, - }) + + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls) { + // avoid increasing variable scope + return } - return w + + return "if block ends with a return statement, so drop this else and outdent its block" } diff --git a/vendor/github.com/mgechev/revive/rule/line-length-limit.go b/vendor/github.com/mgechev/revive/rule/line-length-limit.go index 9e512c1c2c..1a414f6914 100644 --- a/vendor/github.com/mgechev/revive/rule/line-length-limit.go +++ b/vendor/github.com/mgechev/revive/rule/line-length-limit.go @@ -18,10 +18,16 @@ type LineLengthLimitRule struct { sync.Mutex } +const defaultLineLengthLimit = 80 + func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.max == 0 { - checkNumberOfArguments(1, arguments, r.Name()) + if len(arguments) < 1 { + r.max = defaultLineLengthLimit + return + } max, ok := arguments[0].(int64) // Alt. non panicking version if !ok || max < 0 { @@ -30,7 +36,6 @@ func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { r.max = int(max) } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/max-control-nesting.go b/vendor/github.com/mgechev/revive/rule/max-control-nesting.go new file mode 100644 index 0000000000..c4eb361937 --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/max-control-nesting.go @@ -0,0 +1,128 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +// MaxControlNestingRule lints given else constructs. +type MaxControlNestingRule struct { + max int64 + sync.Mutex +} + +const defaultMaxControlNesting = 5 + +// Apply applies the rule to given file. +func (r *MaxControlNestingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { + r.configure(arguments) + + var failures []lint.Failure + + fileAst := file.AST + + walker := &lintMaxControlNesting{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + max: int(r.max), + } + + ast.Walk(walker, fileAst) + + return failures +} + +// Name returns the rule name. +func (*MaxControlNestingRule) Name() string { + return "max-control-nesting" +} + +type lintMaxControlNesting struct { + max int + onFailure func(lint.Failure) + nestingLevelAcc int + lastCtrlStmt ast.Node +} + +func (w *lintMaxControlNesting) Visit(n ast.Node) ast.Visitor { + if w.nestingLevelAcc > w.max { // we are visiting a node beyond the max nesting level + w.onFailure(lint.Failure{ + Failure: fmt.Sprintf("control flow nesting exceeds %d", w.max), + Confidence: 1, + Node: w.lastCtrlStmt, + Category: "complexity", + }) + return nil // stop visiting deeper + } + + switch v := n.(type) { + case *ast.IfStmt: + w.lastCtrlStmt = v + w.walkControlledBlock(v.Body) // "then" branch block + if v.Else != nil { + w.walkControlledBlock(v.Else) // "else" branch block + } + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.ForStmt: + w.lastCtrlStmt = v + w.walkControlledBlock(v.Body) + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.CaseClause: // switch case + w.lastCtrlStmt = v + for _, s := range v.Body { // visit each statement in the case clause + w.walkControlledBlock(s) + } + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.CommClause: // select case + w.lastCtrlStmt = v + for _, s := range v.Body { // visit each statement in the select case clause + w.walkControlledBlock(s) + } + return nil // stop re-visiting nesting blocks (already visited by w.walkControlledBlock) + + case *ast.FuncLit: + walker := &lintMaxControlNesting{ + onFailure: w.onFailure, + max: w.max, + } + ast.Walk(walker, v.Body) + return nil + } + + return w +} + +func (w *lintMaxControlNesting) walkControlledBlock(b ast.Node) { + oldNestingLevel := w.nestingLevelAcc + w.nestingLevelAcc++ + ast.Walk(w, b) + w.nestingLevelAcc = oldNestingLevel +} + +func (r *MaxControlNestingRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + if !(r.max < 1) { + return // max already set + } + + if len(arguments) < 1 { + r.max = defaultMaxControlNesting + return + } + + checkNumberOfArguments(1, arguments, r.Name()) + + max, ok := arguments[0].(int64) // Alt. non panicking version + if !ok { + panic(`invalid value passed as argument number to the "max-control-nesting" rule`) + } + r.max = max +} diff --git a/vendor/github.com/mgechev/revive/rule/max-public-structs.go b/vendor/github.com/mgechev/revive/rule/max-public-structs.go index e39f49c698..25be3e676f 100644 --- a/vendor/github.com/mgechev/revive/rule/max-public-structs.go +++ b/vendor/github.com/mgechev/revive/rule/max-public-structs.go @@ -14,9 +14,17 @@ type MaxPublicStructsRule struct { sync.Mutex } +const defaultMaxPublicStructs = 5 + func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { r.Lock() + defer r.Unlock() if r.max < 1 { + if len(arguments) < 1 { + r.max = defaultMaxPublicStructs + return + } + checkNumberOfArguments(1, arguments, r.Name()) max, ok := arguments[0].(int64) // Alt. non panicking version @@ -25,7 +33,6 @@ func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { } r.max = max } - r.Unlock() } // Apply applies the rule to given file. diff --git a/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go b/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go index 34e6515570..e9e64b9a6a 100644 --- a/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go +++ b/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go @@ -78,11 +78,6 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { if name == "" || name != receiverName { continue } - - if w.skipType(ast.Expr(e.Sel)) { - continue - } - case *ast.Ident: // receiver := ... if e.Name != receiverName { continue @@ -97,7 +92,7 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { return false } - assignmentsToReceiver := pick(n.Body, fselect, nil) + assignmentsToReceiver := pick(n.Body, fselect) for _, assignment := range assignmentsToReceiver { w.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/nested-structs.go b/vendor/github.com/mgechev/revive/rule/nested-structs.go index 968511f2ec..147bd482b1 100644 --- a/vendor/github.com/mgechev/revive/rule/nested-structs.go +++ b/vendor/github.com/mgechev/revive/rule/nested-structs.go @@ -14,7 +14,6 @@ func (*NestedStructs) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { var failures []lint.Failure walker := &lintNestedStructs{ - fileAST: file.AST, onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, @@ -31,42 +30,46 @@ func (*NestedStructs) Name() string { } type lintNestedStructs struct { - fileAST *ast.File onFailure func(lint.Failure) } func (l *lintNestedStructs) Visit(n ast.Node) ast.Visitor { - switch v := n.(type) { - case *ast.FuncDecl: - if v.Body != nil { - ast.Walk(l, v.Body) - } - return nil - case *ast.Field: - _, isChannelField := v.Type.(*ast.ChanType) - if isChannelField { - return nil - } + if v, ok := n.(*ast.StructType); ok { + ls := &lintStruct{l.onFailure} + ast.Walk(ls, v.Fields) + } - filter := func(n ast.Node) bool { - switch n.(type) { - case *ast.StructType: - return true - default: - return false - } - } - structs := pick(v, filter, nil) - for _, s := range structs { - l.onFailure(lint.Failure{ - Failure: "no nested structs are allowed", - Category: "style", - Node: s, - Confidence: 1, - }) + return l +} + +type lintStruct struct { + onFailure func(lint.Failure) +} + +func (l *lintStruct) Visit(n ast.Node) ast.Visitor { + switch s := n.(type) { + case *ast.StructType: + l.fail(s) + return nil + case *ast.ArrayType: + if _, ok := s.Elt.(*ast.StructType); ok { + l.fail(s) } - return nil // no need to visit (again) the field + return nil + case *ast.ChanType: + return nil + case *ast.MapType: + return nil + default: + return l } +} - return l +func (l *lintStruct) fail(n ast.Node) { + l.onFailure(lint.Failure{ + Failure: "no nested structs are allowed", + Category: "style", + Node: n, + Confidence: 1, + }) } diff --git a/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go b/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go index 88928bb98c..841bde56c0 100644 --- a/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go +++ b/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go @@ -54,13 +54,13 @@ func (w lintOptimizeOperandsOrderlExpr) Visit(node ast.Node) ast.Visitor { } // check if the left sub-expression contains a function call - nodes := pick(binExpr.X, isCaller, nil) + nodes := pick(binExpr.X, isCaller) if len(nodes) < 1 { return w } // check if the right sub-expression does not contain a function call - nodes = pick(binExpr.Y, isCaller, nil) + nodes = pick(binExpr.Y, isCaller) if len(nodes) > 0 { return w } diff --git a/vendor/github.com/mgechev/revive/rule/package-comments.go b/vendor/github.com/mgechev/revive/rule/package-comments.go index 33963ab976..02f246be08 100644 --- a/vendor/github.com/mgechev/revive/rule/package-comments.go +++ b/vendor/github.com/mgechev/revive/rule/package-comments.go @@ -58,12 +58,14 @@ func (l *lintPackageComments) checkPackageComment() []lint.Failure { var packageFile *ast.File // which name is $package.go var firstFile *ast.File var firstFileName string + var fileSource string for name, file := range l.file.Pkg.Files() { if file.AST.Doc != nil { return nil } if name == "doc.go" { docFile = file.AST + fileSource = "doc.go" } if name == file.AST.Name.String()+".go" { packageFile = file.AST @@ -76,14 +78,21 @@ func (l *lintPackageComments) checkPackageComment() []lint.Failure { // prefer warning on doc.go, $package.go over first file if docFile == nil { docFile = packageFile + fileSource = l.fileAst.Name.String() + ".go" } if docFile == nil { docFile = firstFile + fileSource = firstFileName } + if docFile != nil { + pkgFile := l.file.Pkg.Files()[fileSource] return []lint.Failure{{ - Category: "comments", - Node: docFile, + Category: "comments", + Position: lint.FailurePosition{ + Start: pkgFile.ToPosition(docFile.Pos()), + End: pkgFile.ToPosition(docFile.Name.End()), + }, Confidence: 1, Failure: "should have a package comment", }} diff --git a/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go b/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go index 23dd85a7ac..b3ff084563 100644 --- a/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go +++ b/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go @@ -89,6 +89,9 @@ func (w *lintRedefinesBuiltinID) Visit(node ast.Node) ast.Visitor { case *ast.GenDecl: switch n.Tok { case token.TYPE: + if len(n.Specs) < 1 { + return nil + } typeSpec, ok := n.Specs[0].(*ast.TypeSpec) if !ok { return nil diff --git a/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go b/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go new file mode 100644 index 0000000000..fa5281f24b --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go @@ -0,0 +1,52 @@ +package rule + +import ( + "fmt" + "go/ast" + "strings" + + "github.com/mgechev/revive/lint" +) + +// RedundantImportAlias lints given else constructs. +type RedundantImportAlias struct{} + +// Apply applies the rule to given file. +func (*RedundantImportAlias) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { + var failures []lint.Failure + + for _, imp := range file.AST.Imports { + if imp.Name == nil { + continue + } + + if getImportPackageName(imp) == imp.Name.Name { + failures = append(failures, lint.Failure{ + Confidence: 1, + Failure: fmt.Sprintf("Import alias \"%s\" is redundant", imp.Name.Name), + Node: imp, + Category: "imports", + }) + } + } + + return failures +} + +// Name returns the rule name. +func (*RedundantImportAlias) Name() string { + return "redundant-import-alias" +} + +func getImportPackageName(imp *ast.ImportSpec) string { + const pathSep = "/" + const strDelim = `"` + + path := imp.Path.Value + i := strings.LastIndex(path, pathSep) + if i == -1 { + return strings.Trim(path, strDelim) + } + + return strings.Trim(path[i+1:], strDelim) +} diff --git a/vendor/github.com/mgechev/revive/rule/string-format.go b/vendor/github.com/mgechev/revive/rule/string-format.go index e7841e8c3d..70edf7387c 100644 --- a/vendor/github.com/mgechev/revive/rule/string-format.go +++ b/vendor/github.com/mgechev/revive/rule/string-format.go @@ -38,7 +38,7 @@ func (*StringFormatRule) Name() string { // ParseArgumentsTest is a public wrapper around w.parseArguments used for testing. Returns the error message provided to panic, or nil if no error was encountered func (StringFormatRule) ParseArgumentsTest(arguments lint.Arguments) *string { w := lintStringFormatRule{} - c := make(chan interface{}) + c := make(chan any) // Parse the arguments in a goroutine, defer a recover() call, return the error encountered (or nil if there was no error) go func() { defer func() { @@ -68,6 +68,7 @@ type stringFormatSubrule struct { parent *lintStringFormatRule scope stringFormatSubruleScope regexp *regexp.Regexp + negated bool errorMessage string } @@ -89,18 +90,19 @@ var parseStringFormatScope = regexp.MustCompile( func (w *lintStringFormatRule) parseArguments(arguments lint.Arguments) { for i, argument := range arguments { - scope, regex, errorMessage := w.parseArgument(argument, i) + scope, regex, negated, errorMessage := w.parseArgument(argument, i) w.rules = append(w.rules, stringFormatSubrule{ parent: w, scope: scope, regexp: regex, + negated: negated, errorMessage: errorMessage, }) } } -func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) (scope stringFormatSubruleScope, regex *regexp.Regexp, errorMessage string) { - g, ok := argument.([]interface{}) // Cast to generic slice first +func (w lintStringFormatRule) parseArgument(argument any, ruleNum int) (scope stringFormatSubruleScope, regex *regexp.Regexp, negated bool, errorMessage string) { + g, ok := argument.([]any) // Cast to generic slice first if !ok { w.configError("argument is not a slice", ruleNum, 0) } @@ -146,7 +148,12 @@ func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) ( } // Strip / characters from the beginning and end of rule[1] before compiling - regex, err := regexp.Compile(rule[1][1 : len(rule[1])-1]) + negated = rule[1][0] == '!' + offset := 1 + if negated { + offset++ + } + regex, err := regexp.Compile(rule[1][offset : len(rule[1])-1]) if err != nil { w.parseError(fmt.Sprintf("unable to compile %s as regexp", rule[1]), ruleNum, 1) } @@ -155,7 +162,7 @@ func (w lintStringFormatRule) parseArgument(argument interface{}, ruleNum int) ( if len(rule) == 3 { errorMessage = rule[2] } - return scope, regex, errorMessage + return scope, regex, negated, errorMessage } // Report an invalid config, this is specifically the user's fault @@ -204,10 +211,14 @@ func (lintStringFormatRule) getCallName(call *ast.CallExpr) (callName string, ok if selector, ok := call.Fun.(*ast.SelectorExpr); ok { // Scoped function call scope, ok := selector.X.(*ast.Ident) - if !ok { - return "", false + if ok { + return scope.Name + "." + selector.Sel.Name, true + } + // Scoped function call inside structure + recv, ok := selector.X.(*ast.SelectorExpr) + if ok { + return recv.Sel.Name + "." + selector.Sel.Name, true } - return scope.Name + "." + selector.Sel.Name, true } return "", false @@ -261,7 +272,26 @@ func (r *stringFormatSubrule) Apply(call *ast.CallExpr) { } func (r *stringFormatSubrule) lintMessage(s string, node ast.Node) { - // Fail if the string doesn't match the user's regex + if r.negated { + if !r.regexp.MatchString(s) { + return + } + // Fail if the string does match the user's regex + var failure string + if len(r.errorMessage) > 0 { + failure = r.errorMessage + } else { + failure = fmt.Sprintf("string literal matches user defined regex /%s/", r.regexp.String()) + } + r.parent.onFailure(lint.Failure{ + Confidence: 1, + Failure: failure, + Node: node, + }) + return + } + + // Fail if the string does NOT match the user's regex if r.regexp.MatchString(s) { return } diff --git a/vendor/github.com/mgechev/revive/rule/struct-tag.go b/vendor/github.com/mgechev/revive/rule/struct-tag.go index 3accf58fb6..f6ee47a731 100644 --- a/vendor/github.com/mgechev/revive/rule/struct-tag.go +++ b/vendor/github.com/mgechev/revive/rule/struct-tag.go @@ -5,23 +5,55 @@ import ( "go/ast" "strconv" "strings" + "sync" "github.com/fatih/structtag" "github.com/mgechev/revive/lint" ) // StructTagRule lints struct tags. -type StructTagRule struct{} +type StructTagRule struct { + userDefined map[string][]string // map: key -> []option + sync.Mutex +} + +func (r *StructTagRule) configure(arguments lint.Arguments) { + r.Lock() + defer r.Unlock() + if r.userDefined == nil && len(arguments) > 0 { + checkNumberOfArguments(1, arguments, r.Name()) + r.userDefined = make(map[string][]string, len(arguments)) + for _, arg := range arguments { + item, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the %s rule. Expecting a string, got %v (of type %T)", r.Name(), arg, arg)) + } + parts := strings.Split(item, ",") + if len(parts) < 2 { + panic(fmt.Sprintf("Invalid argument to the %s rule. Expecting a string of the form key[,option]+, got %s", r.Name(), item)) + } + key := strings.TrimSpace(parts[0]) + for i := 1; i < len(parts); i++ { + option := strings.TrimSpace(parts[i]) + r.userDefined[key] = append(r.userDefined[key], option) + } + } + } +} // Apply applies the rule to given file. -func (*StructTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure +func (r *StructTagRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) + var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - w := lintStructTagRule{onFailure: onFailure} + w := lintStructTagRule{ + onFailure: onFailure, + userDefined: r.userDefined, + } ast.Walk(w, file.AST) @@ -35,8 +67,9 @@ func (*StructTagRule) Name() string { type lintStructTagRule struct { onFailure func(lint.Failure) - usedTagNbr map[int]bool // list of used tag numbers - usedTagName map[string]bool // list of used tag keys + userDefined map[string][]string // map: key -> []option + usedTagNbr map[int]bool // list of used tag numbers + usedTagName map[string]bool // list of used tag keys } func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { @@ -57,17 +90,26 @@ func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { return w } +const keyASN1 = "asn1" +const keyBSON = "bson" +const keyDefault = "default" +const keyJSON = "json" +const keyProtobuf = "protobuf" +const keyRequired = "required" +const keyXML = "xml" +const keyYAML = "yaml" + func (w lintStructTagRule) checkTagNameIfNeed(tag *structtag.Tag) (string, bool) { isUnnamedTag := tag.Name == "" || tag.Name == "-" if isUnnamedTag { return "", true } - needsToCheckTagName := tag.Key == "bson" || - tag.Key == "json" || - tag.Key == "xml" || - tag.Key == "yaml" || - tag.Key == "protobuf" + needsToCheckTagName := tag.Key == keyBSON || + tag.Key == keyJSON || + tag.Key == keyXML || + tag.Key == keyYAML || + tag.Key == keyProtobuf if !needsToCheckTagName { return "", true @@ -92,13 +134,13 @@ func (w lintStructTagRule) checkTagNameIfNeed(tag *structtag.Tag) (string, bool) func (lintStructTagRule) getTagName(tag *structtag.Tag) string { switch tag.Key { - case "protobuf": + case keyProtobuf: for _, option := range tag.Options { if strings.HasPrefix(option, "name=") { - return strings.TrimLeft(option, "name=") + return strings.TrimPrefix(option, "name=") } } - return "" //protobuf tag lacks 'name' option + return "" // protobuf tag lacks 'name' option default: return tag.Name } @@ -123,40 +165,40 @@ func (w lintStructTagRule) checkTaggedField(f *ast.Field) { } switch key := tag.Key; key { - case "asn1": + case keyASN1: msg, ok := w.checkASN1Tag(f.Type, tag) if !ok { w.addFailure(f.Tag, msg) } - case "bson": + case keyBSON: msg, ok := w.checkBSONTag(tag.Options) if !ok { w.addFailure(f.Tag, msg) } - case "default": + case keyDefault: if !w.typeValueMatch(f.Type, tag.Name) { w.addFailure(f.Tag, "field's type and default value's type mismatch") } - case "json": + case keyJSON: msg, ok := w.checkJSONTag(tag.Name, tag.Options) if !ok { w.addFailure(f.Tag, msg) } - case "protobuf": + case keyProtobuf: msg, ok := w.checkProtobufTag(tag) if !ok { w.addFailure(f.Tag, msg) } - case "required": + case keyRequired: if tag.Name != "true" && tag.Name != "false" { w.addFailure(f.Tag, "required should be 'true' or 'false'") } - case "xml": + case keyXML: msg, ok := w.checkXMLTag(tag.Options) if !ok { w.addFailure(f.Tag, msg) } - case "yaml": + case keyYAML: msg, ok := w.checkYAMLTag(tag.Options) if !ok { w.addFailure(f.Tag, msg) @@ -201,6 +243,10 @@ func (w lintStructTagRule) checkASN1Tag(t ast.Expr, tag *structtag.Tag) (string, continue } + if w.isUserDefined(keyASN1, opt) { + continue + } + return fmt.Sprintf("unknown option '%s' in ASN1 tag", opt), false } } @@ -208,11 +254,14 @@ func (w lintStructTagRule) checkASN1Tag(t ast.Expr, tag *structtag.Tag) (string, return "", true } -func (lintStructTagRule) checkBSONTag(options []string) (string, bool) { +func (w lintStructTagRule) checkBSONTag(options []string) (string, bool) { for _, opt := range options { switch opt { case "inline", "minsize", "omitempty": default: + if w.isUserDefined(keyBSON, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in BSON tag", opt), false } } @@ -220,7 +269,7 @@ func (lintStructTagRule) checkBSONTag(options []string) (string, bool) { return "", true } -func (lintStructTagRule) checkJSONTag(name string, options []string) (string, bool) { +func (w lintStructTagRule) checkJSONTag(name string, options []string) (string, bool) { for _, opt := range options { switch opt { case "omitempty", "string": @@ -230,6 +279,9 @@ func (lintStructTagRule) checkJSONTag(name string, options []string) (string, bo return "option can not be empty in JSON tag", false } default: + if w.isUserDefined(keyJSON, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in JSON tag", opt), false } } @@ -237,11 +289,14 @@ func (lintStructTagRule) checkJSONTag(name string, options []string) (string, bo return "", true } -func (lintStructTagRule) checkXMLTag(options []string) (string, bool) { +func (w lintStructTagRule) checkXMLTag(options []string) (string, bool) { for _, opt := range options { switch opt { case "any", "attr", "cdata", "chardata", "comment", "innerxml", "omitempty", "typeattr": default: + if w.isUserDefined(keyXML, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in XML tag", opt), false } } @@ -249,11 +304,14 @@ func (lintStructTagRule) checkXMLTag(options []string) (string, bool) { return "", true } -func (lintStructTagRule) checkYAMLTag(options []string) (string, bool) { +func (w lintStructTagRule) checkYAMLTag(options []string) (string, bool) { for _, opt := range options { switch opt { case "flow", "inline", "omitempty": default: + if w.isUserDefined(keyYAML, opt) { + continue + } return fmt.Sprintf("unknown option '%s' in YAML tag", opt), false } } @@ -330,6 +388,9 @@ func (w lintStructTagRule) checkProtobufTag(tag *structtag.Tag) (string, bool) { case "name", "json": // do nothing default: + if w.isUserDefined(keyProtobuf, k) { + continue + } return fmt.Sprintf("unknown option '%s' in protobuf tag", k), false } } @@ -344,3 +405,17 @@ func (w lintStructTagRule) addFailure(n ast.Node, msg string) { Confidence: 1, }) } + +func (w lintStructTagRule) isUserDefined(key, opt string) bool { + if w.userDefined == nil { + return false + } + + options := w.userDefined[key] + for _, o := range options { + if opt == o { + return true + } + } + return false +} diff --git a/vendor/github.com/mgechev/revive/rule/superfluous-else.go b/vendor/github.com/mgechev/revive/rule/superfluous-else.go index a9e4380c90..2aa1b6b2ca 100644 --- a/vendor/github.com/mgechev/revive/rule/superfluous-else.go +++ b/vendor/github.com/mgechev/revive/rule/superfluous-else.go @@ -2,9 +2,7 @@ package rule import ( "fmt" - "go/ast" - "go/token" - + "github.com/mgechev/revive/internal/ifelse" "github.com/mgechev/revive/lint" ) @@ -12,27 +10,8 @@ import ( type SuperfluousElseRule struct{} // Apply applies the rule to given file. -func (*SuperfluousElseRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { - var failures []lint.Failure - onFailure := func(failure lint.Failure) { - failures = append(failures, failure) - } - - branchingFunctions := map[string]map[string]bool{ - "os": {"Exit": true}, - "log": { - "Fatal": true, - "Fatalf": true, - "Fatalln": true, - "Panic": true, - "Panicf": true, - "Panicln": true, - }, - } - - w := lintSuperfluousElse{make(map[*ast.IfStmt]bool), onFailure, branchingFunctions} - ast.Walk(w, file.AST) - return failures +func (e *SuperfluousElseRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + return ifelse.Apply(e, file.AST, ifelse.TargetElse, args) } // Name returns the rule name. @@ -40,75 +19,28 @@ func (*SuperfluousElseRule) Name() string { return "superfluous-else" } -type lintSuperfluousElse struct { - ignore map[*ast.IfStmt]bool - onFailure func(lint.Failure) - branchingFunctions map[string]map[string]bool -} - -func (w lintSuperfluousElse) Visit(node ast.Node) ast.Visitor { - ifStmt, ok := node.(*ast.IfStmt) - if !ok || ifStmt.Else == nil { - return w - } - if w.ignore[ifStmt] { - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - } - return w - } - if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { - w.ignore[elseif] = true - return w - } - if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { - // only care about elses without conditions - return w - } - if len(ifStmt.Body.List) == 0 { - return w - } - shortDecl := false // does the if statement have a ":=" initialization statement? - if ifStmt.Init != nil { - if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { - shortDecl = true - } - } - extra := "" - if shortDecl { - extra = " (move short variable declaration to its own line if necessary)" +// CheckIfElse evaluates the rule against an ifelse.Chain. +func (*SuperfluousElseRule) CheckIfElse(chain ifelse.Chain, args ifelse.Args) (failMsg string) { + if !chain.If.Deviates() { + // this rule only applies if the if-block deviates control flow + return } - lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] - switch stmt := lastStmt.(type) { - case *ast.BranchStmt: - tok := stmt.Tok.String() - if tok != "fallthrough" { - w.onFailure(newFailure(ifStmt.Else, "if block ends with a "+tok+" statement, so drop this else and outdent its block"+extra)) - } - case *ast.ExprStmt: - if ce, ok := stmt.X.(*ast.CallExpr); ok { // it's a function call - if fc, ok := ce.Fun.(*ast.SelectorExpr); ok { - if id, ok := fc.X.(*ast.Ident); ok { - fn := fc.Sel.Name - pkg := id.Name - if w.branchingFunctions[pkg][fn] { // it's a call to a branching function - w.onFailure( - newFailure(ifStmt.Else, fmt.Sprintf("if block ends with call to %s.%s function, so drop this else and outdent its block%s", pkg, fn, extra))) - } - } - } - } + if chain.HasPriorNonDeviating { + // if we de-indent the "else" block then a previous branch + // might flow into it, affecting program behaviour + return } - return w -} + if chain.If.Returns() { + // avoid overlapping with indent-error-flow + return + } -func newFailure(node ast.Node, msg string) lint.Failure { - return lint.Failure{ - Confidence: 1, - Node: node, - Category: "indent", - Failure: msg, + if args.PreserveScope && !chain.AtBlockEnd && (chain.HasInitializer || chain.Else.HasDecls) { + // avoid increasing variable scope + return } + + return fmt.Sprintf("if block ends with %v, so drop this else and outdent its block", chain.If.LongString()) } diff --git a/vendor/github.com/mgechev/revive/rule/time-equal.go b/vendor/github.com/mgechev/revive/rule/time-equal.go index 72ecf26fe4..3b85e18a8e 100644 --- a/vendor/github.com/mgechev/revive/rule/time-equal.go +++ b/vendor/github.com/mgechev/revive/rule/time-equal.go @@ -60,9 +60,9 @@ func (l *lintTimeEqual) Visit(node ast.Node) ast.Visitor { var failure string switch expr.Op { case token.EQL: - failure = fmt.Sprintf("use %s.Equal(%s) instead of %q operator", expr.X, expr.Y, expr.Op) + failure = fmt.Sprintf("use %s.Equal(%s) instead of %q operator", gofmt(expr.X), gofmt(expr.Y), expr.Op) case token.NEQ: - failure = fmt.Sprintf("use !%s.Equal(%s) instead of %q operator", expr.X, expr.Y, expr.Op) + failure = fmt.Sprintf("use !%s.Equal(%s) instead of %q operator", gofmt(expr.X), gofmt(expr.Y), expr.Op) } l.onFailure(lint.Failure{ diff --git a/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go b/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go new file mode 100644 index 0000000000..df27743cbd --- /dev/null +++ b/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go @@ -0,0 +1,194 @@ +package rule + +import ( + "fmt" + "go/ast" + "sync" + + "github.com/mgechev/revive/lint" +) + +const ( + ruleUTAMessagePanic = "type assertion will panic if not matched" + ruleUTAMessageIgnored = "type assertion result ignored" +) + +// UncheckedTypeAssertionRule lints missing or ignored `ok`-value in danymic type casts. +type UncheckedTypeAssertionRule struct { + sync.Mutex + acceptIgnoredAssertionResult bool + configured bool +} + +func (u *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { + u.Lock() + defer u.Unlock() + + if len(arguments) == 0 || u.configured { + return + } + + u.configured = true + + args, ok := arguments[0].(map[string]any) + if !ok { + panic("Unable to get arguments. Expected object of key-value-pairs.") + } + + for k, v := range args { + switch k { + case "acceptIgnoredAssertionResult": + u.acceptIgnoredAssertionResult, ok = v.(bool) + if !ok { + panic(fmt.Sprintf("Unable to parse argument '%s'. Expected boolean.", k)) + } + default: + panic(fmt.Sprintf("Unknown argument: %s", k)) + } + } +} + +// Apply applies the rule to given file. +func (u *UncheckedTypeAssertionRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + u.configure(args) + + var failures []lint.Failure + + walker := &lintUnchekedTypeAssertion{ + onFailure: func(failure lint.Failure) { + failures = append(failures, failure) + }, + acceptIgnoredTypeAssertionResult: u.acceptIgnoredAssertionResult, + } + + ast.Walk(walker, file.AST) + + return failures +} + +// Name returns the rule name. +func (*UncheckedTypeAssertionRule) Name() string { + return "unchecked-type-assertion" +} + +type lintUnchekedTypeAssertion struct { + onFailure func(lint.Failure) + acceptIgnoredTypeAssertionResult bool +} + +func isIgnored(e ast.Expr) bool { + ident, ok := e.(*ast.Ident) + if !ok { + return false + } + + return ident.Name == "_" +} + +func isTypeSwitch(e *ast.TypeAssertExpr) bool { + return e.Type == nil +} + +func (w *lintUnchekedTypeAssertion) requireNoTypeAssert(expr ast.Expr) { + e, ok := expr.(*ast.TypeAssertExpr) + if ok && !isTypeSwitch(e) { + w.addFailure(e, ruleUTAMessagePanic) + } +} + +func (w *lintUnchekedTypeAssertion) handleIfStmt(n *ast.IfStmt) { + ifCondition, ok := n.Cond.(*ast.BinaryExpr) + if ok { + w.requireNoTypeAssert(ifCondition.X) + w.requireNoTypeAssert(ifCondition.Y) + } +} + +func (w *lintUnchekedTypeAssertion) requireBinaryExpressionWithoutTypeAssertion(expr ast.Expr) { + binaryExpr, ok := expr.(*ast.BinaryExpr) + if ok { + w.requireNoTypeAssert(binaryExpr.X) + w.requireNoTypeAssert(binaryExpr.Y) + } +} + +func (w *lintUnchekedTypeAssertion) handleCaseClause(n *ast.CaseClause) { + for _, expr := range n.List { + w.requireNoTypeAssert(expr) + w.requireBinaryExpressionWithoutTypeAssertion(expr) + } +} + +func (w *lintUnchekedTypeAssertion) handleSwitch(n *ast.SwitchStmt) { + w.requireNoTypeAssert(n.Tag) + w.requireBinaryExpressionWithoutTypeAssertion(n.Tag) +} + +func (w *lintUnchekedTypeAssertion) handleAssignment(n *ast.AssignStmt) { + if len(n.Rhs) == 0 { + return + } + + e, ok := n.Rhs[0].(*ast.TypeAssertExpr) + if !ok || e == nil { + return + } + + if isTypeSwitch(e) { + return + } + + if len(n.Lhs) == 1 { + w.addFailure(e, ruleUTAMessagePanic) + } + + if !w.acceptIgnoredTypeAssertionResult && len(n.Lhs) == 2 && isIgnored(n.Lhs[1]) { + w.addFailure(e, ruleUTAMessageIgnored) + } +} + +// handles "return foo(.*bar)" - one of them is enough to fail as golang does not forward the type cast tuples in return statements +func (w *lintUnchekedTypeAssertion) handleReturn(n *ast.ReturnStmt) { + for _, r := range n.Results { + w.requireNoTypeAssert(r) + } +} + +func (w *lintUnchekedTypeAssertion) handleRange(n *ast.RangeStmt) { + w.requireNoTypeAssert(n.X) +} + +func (w *lintUnchekedTypeAssertion) handleChannelSend(n *ast.SendStmt) { + w.requireNoTypeAssert(n.Value) +} + +func (w *lintUnchekedTypeAssertion) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.RangeStmt: + w.handleRange(n) + case *ast.SwitchStmt: + w.handleSwitch(n) + case *ast.ReturnStmt: + w.handleReturn(n) + case *ast.AssignStmt: + w.handleAssignment(n) + case *ast.IfStmt: + w.handleIfStmt(n) + case *ast.CaseClause: + w.handleCaseClause(n) + case *ast.SendStmt: + w.handleChannelSend(n) + } + + return w +} + +func (w *lintUnchekedTypeAssertion) addFailure(n *ast.TypeAssertExpr, why string) { + s := fmt.Sprintf("type cast result is unchecked in %v - %s", gofmt(n), why) + w.onFailure(lint.Failure{ + Category: "bad practice", + Confidence: 1, + Node: n, + Failure: s, + }) +} diff --git a/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go b/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go index bad9075338..9ac2648cdd 100644 --- a/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go +++ b/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go @@ -45,8 +45,9 @@ type funcStatus struct { } type lintUnconditionalRecursionRule struct { - onFailure func(lint.Failure) - currentFunc *funcStatus + onFailure func(lint.Failure) + currentFunc *funcStatus + inGoStatement bool } // Visit will traverse the file AST. @@ -68,9 +69,13 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { default: rec = n.Recv.List[0].Names[0] } - w.currentFunc = &funcStatus{&funcDesc{rec, n.Name}, false} case *ast.CallExpr: + // check if call arguments has a recursive call + for _, arg := range n.Args { + ast.Walk(w, arg) + } + var funcID *ast.Ident var selector *ast.Ident switch c := n.Fun.(type) { @@ -84,6 +89,9 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { return nil } funcID = c.Sel + case *ast.FuncLit: + ast.Walk(w, c.Body) // analyze the body of the function literal + return nil default: return w } @@ -93,11 +101,12 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { w.currentFunc.funcDesc.equal(&funcDesc{selector, funcID}) { w.onFailure(lint.Failure{ Category: "logic", - Confidence: 1, + Confidence: 0.8, Node: n, Failure: "unconditional recursive call", }) } + return nil case *ast.IfStmt: w.updateFuncStatus(n.Body) w.updateFuncStatus(n.Else) @@ -115,16 +124,21 @@ func (w lintUnconditionalRecursionRule) Visit(node ast.Node) ast.Visitor { w.updateFuncStatus(n.Body) return nil case *ast.GoStmt: - for _, a := range n.Call.Args { - ast.Walk(w, a) // check if arguments have a recursive call - } - return nil // recursive async call is not an issue + w.inGoStatement = true + ast.Walk(w, n.Call) + w.inGoStatement = false + return nil case *ast.ForStmt: if n.Cond != nil { return nil } // unconditional loop return w + case *ast.FuncLit: + if w.inGoStatement { + return w + } + return nil // literal call (closure) is not necessarily an issue } return w @@ -181,5 +195,5 @@ func (lintUnconditionalRecursionRule) hasControlExit(node ast.Node) bool { return false } - return len(pick(node, isExit, nil)) != 0 + return len(pick(node, isExit)) != 0 } diff --git a/vendor/github.com/mgechev/revive/rule/unhandled-error.go b/vendor/github.com/mgechev/revive/rule/unhandled-error.go index 6cde24b7f7..ce6fa38641 100644 --- a/vendor/github.com/mgechev/revive/rule/unhandled-error.go +++ b/vendor/github.com/mgechev/revive/rule/unhandled-error.go @@ -4,6 +4,8 @@ import ( "fmt" "go/ast" "go/types" + "regexp" + "strings" "sync" "github.com/mgechev/revive/lint" @@ -11,24 +13,30 @@ import ( // UnhandledErrorRule lints given else constructs. type UnhandledErrorRule struct { - ignoreList ignoreListType + ignoreList []*regexp.Regexp sync.Mutex } -type ignoreListType map[string]struct{} - func (r *UnhandledErrorRule) configure(arguments lint.Arguments) { r.Lock() if r.ignoreList == nil { - r.ignoreList = make(ignoreListType, len(arguments)) - for _, arg := range arguments { argStr, ok := arg.(string) if !ok { panic(fmt.Sprintf("Invalid argument to the unhandled-error rule. Expecting a string, got %T", arg)) } - r.ignoreList[argStr] = struct{}{} + argStr = strings.Trim(argStr, " ") + if argStr == "" { + panic("Invalid argument to the unhandled-error rule, expected regular expression must not be empty.") + } + + exp, err := regexp.Compile(argStr) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the unhandled-error rule: regexp %q does not compile: %v", argStr, err)) + } + + r.ignoreList = append(r.ignoreList, exp) } } r.Unlock() @@ -60,7 +68,7 @@ func (*UnhandledErrorRule) Name() string { } type lintUnhandledErrors struct { - ignoreList ignoreListType + ignoreList []*regexp.Regexp pkg *lint.Package onFailure func(lint.Failure) } @@ -102,8 +110,8 @@ func (w *lintUnhandledErrors) Visit(node ast.Node) ast.Visitor { } func (w *lintUnhandledErrors) addFailure(n *ast.CallExpr) { - funcName := gofmt(n.Fun) - if _, mustIgnore := w.ignoreList[funcName]; mustIgnore { + name := w.funcName(n) + if w.isIgnoredFunc(name) { return } @@ -111,10 +119,34 @@ func (w *lintUnhandledErrors) addFailure(n *ast.CallExpr) { Category: "bad practice", Confidence: 1, Node: n, - Failure: fmt.Sprintf("Unhandled error in call to function %v", funcName), + Failure: fmt.Sprintf("Unhandled error in call to function %v", name), }) } +func (w *lintUnhandledErrors) funcName(call *ast.CallExpr) string { + fn, ok := w.getFunc(call) + if !ok { + return gofmt(call.Fun) + } + + name := fn.FullName() + name = strings.Replace(name, "(", "", -1) + name = strings.Replace(name, ")", "", -1) + name = strings.Replace(name, "*", "", -1) + + return name +} + +func (w *lintUnhandledErrors) isIgnoredFunc(funcName string) bool { + for _, pattern := range w.ignoreList { + if len(pattern.FindString(funcName)) == len(funcName) { + return true + } + } + + return false +} + func (*lintUnhandledErrors) isTypeError(t *types.Named) bool { const errorTypeName = "_.error" @@ -130,3 +162,17 @@ func (w *lintUnhandledErrors) returnsAnError(tt *types.Tuple) bool { } return false } + +func (w *lintUnhandledErrors) getFunc(call *ast.CallExpr) (*types.Func, bool) { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return nil, false + } + + fn, ok := w.pkg.TypesInfo().ObjectOf(sel.Sel).(*types.Func) + if !ok { + return nil, false + } + + return fn, true +} diff --git a/vendor/github.com/mgechev/revive/rule/unused-param.go b/vendor/github.com/mgechev/revive/rule/unused-param.go index ab3da453ee..4b04ee916b 100644 --- a/vendor/github.com/mgechev/revive/rule/unused-param.go +++ b/vendor/github.com/mgechev/revive/rule/unused-param.go @@ -3,22 +3,72 @@ package rule import ( "fmt" "go/ast" + "regexp" + "sync" "github.com/mgechev/revive/lint" ) // UnusedParamRule lints unused params in functions. -type UnusedParamRule struct{} +type UnusedParamRule struct { + configured bool + // regex to check if some name is valid for unused parameter, "^_$" by default + allowRegex *regexp.Regexp + failureMsg string + sync.Mutex +} + +func (r *UnusedParamRule) configure(args lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives + // it's more compatible to JSON nature of configurations + var allowedRegexStr string + if len(args) == 0 { + allowedRegexStr = "^_$" + r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it as _" + } else { + // Arguments = [{}] + options := args[0].(map[string]any) + // Arguments = [{allowedRegex="^_"}] + + if allowedRegexParam, ok := options["allowRegex"]; ok { + allowedRegexStr, ok = allowedRegexParam.(string) + if !ok { + panic(fmt.Errorf("error configuring %s rule: allowedRegex is not string but [%T]", r.Name(), allowedRegexParam)) + } + } + } + var err error + r.allowRegex, err = regexp.Compile(allowedRegexStr) + if err != nil { + panic(fmt.Errorf("error configuring %s rule: allowedRegex is not valid regex [%s]: %v", r.Name(), allowedRegexStr, err)) + } + + if r.failureMsg == "" { + r.failureMsg = "parameter '%s' seems to be unused, consider removing or renaming it to match " + r.allowRegex.String() + } +} // Apply applies the rule to given file. -func (*UnusedParamRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *UnusedParamRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - - w := lintUnusedParamRule{onFailure: onFailure} + w := lintUnusedParamRule{ + onFailure: onFailure, + allowRegex: r.allowRegex, + failureMsg: r.failureMsg, + } ast.Walk(w, file.AST) @@ -31,55 +81,70 @@ func (*UnusedParamRule) Name() string { } type lintUnusedParamRule struct { - onFailure func(lint.Failure) + onFailure func(lint.Failure) + allowRegex *regexp.Regexp + failureMsg string } func (w lintUnusedParamRule) Visit(node ast.Node) ast.Visitor { + var ( + funcType *ast.FuncType + funcBody *ast.BlockStmt + ) switch n := node.(type) { + case *ast.FuncLit: + funcType = n.Type + funcBody = n.Body case *ast.FuncDecl: - params := retrieveNamedParams(n.Type.Params) - if len(params) < 1 { - return nil // skip, func without parameters - } - if n.Body == nil { return nil // skip, is a function prototype } - // inspect the func body looking for references to parameters - fselect := func(n ast.Node) bool { - ident, isAnID := n.(*ast.Ident) + funcType = n.Type + funcBody = n.Body + default: + return w // skip, not a function + } - if !isAnID { - return false - } + params := retrieveNamedParams(funcType.Params) + if len(params) < 1 { + return w // skip, func without parameters + } - _, isAParam := params[ident.Obj] - if isAParam { - params[ident.Obj] = false // mark as used - } + // inspect the func body looking for references to parameters + fselect := func(n ast.Node) bool { + ident, isAnID := n.(*ast.Ident) + if !isAnID { return false } - _ = pick(n.Body, fselect, nil) - - for _, p := range n.Type.Params.List { - for _, n := range p.Names { - if params[n.Obj] { - w.onFailure(lint.Failure{ - Confidence: 1, - Node: n, - Category: "bad practice", - Failure: fmt.Sprintf("parameter '%s' seems to be unused, consider removing or renaming it as _", n.Name), - }) - } - } + + _, isAParam := params[ident.Obj] + if isAParam { + params[ident.Obj] = false // mark as used } - return nil // full method body already inspected + return false + } + _ = pick(funcBody, fselect) + + for _, p := range funcType.Params.List { + for _, n := range p.Names { + if w.allowRegex.FindStringIndex(n.Name) != nil { + continue + } + if params[n.Obj] { + w.onFailure(lint.Failure{ + Confidence: 1, + Node: n, + Category: "bad practice", + Failure: fmt.Sprintf(w.failureMsg, n.Name), + }) + } + } } - return w + return w // full method body was inspected } func retrieveNamedParams(params *ast.FieldList) map[*ast.Object]bool { diff --git a/vendor/github.com/mgechev/revive/rule/unused-receiver.go b/vendor/github.com/mgechev/revive/rule/unused-receiver.go index 2289a517e5..715dba3383 100644 --- a/vendor/github.com/mgechev/revive/rule/unused-receiver.go +++ b/vendor/github.com/mgechev/revive/rule/unused-receiver.go @@ -3,22 +3,72 @@ package rule import ( "fmt" "go/ast" + "regexp" + "sync" "github.com/mgechev/revive/lint" ) // UnusedReceiverRule lints unused params in functions. -type UnusedReceiverRule struct{} +type UnusedReceiverRule struct { + configured bool + // regex to check if some name is valid for unused parameter, "^_$" by default + allowRegex *regexp.Regexp + failureMsg string + sync.Mutex +} + +func (r *UnusedReceiverRule) configure(args lint.Arguments) { + r.Lock() + defer r.Unlock() + + if r.configured { + return + } + r.configured = true + + // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives + // it's more compatible to JSON nature of configurations + var allowedRegexStr string + if len(args) == 0 { + allowedRegexStr = "^_$" + r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it as _" + } else { + // Arguments = [{}] + options := args[0].(map[string]any) + // Arguments = [{allowedRegex="^_"}] + + if allowedRegexParam, ok := options["allowRegex"]; ok { + allowedRegexStr, ok = allowedRegexParam.(string) + if !ok { + panic(fmt.Errorf("error configuring [unused-receiver] rule: allowedRegex is not string but [%T]", allowedRegexParam)) + } + } + } + var err error + r.allowRegex, err = regexp.Compile(allowedRegexStr) + if err != nil { + panic(fmt.Errorf("error configuring [unused-receiver] rule: allowedRegex is not valid regex [%s]: %v", allowedRegexStr, err)) + } + if r.failureMsg == "" { + r.failureMsg = "method receiver '%s' is not referenced in method's body, consider removing or renaming it to match " + r.allowRegex.String() + } +} // Apply applies the rule to given file. -func (*UnusedReceiverRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure { +func (r *UnusedReceiverRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configure(args) var failures []lint.Failure onFailure := func(failure lint.Failure) { failures = append(failures, failure) } - w := lintUnusedReceiverRule{onFailure: onFailure} + w := lintUnusedReceiverRule{ + onFailure: onFailure, + allowRegex: r.allowRegex, + failureMsg: r.failureMsg, + } ast.Walk(w, file.AST) @@ -31,7 +81,9 @@ func (*UnusedReceiverRule) Name() string { } type lintUnusedReceiverRule struct { - onFailure func(lint.Failure) + onFailure func(lint.Failure) + allowRegex *regexp.Regexp + failureMsg string } func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { @@ -51,13 +103,17 @@ func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { return nil // the receiver is already named _ } + if w.allowRegex != nil && w.allowRegex.FindStringIndex(recID.Name) != nil { + return nil + } + // inspect the func body looking for references to the receiver id fselect := func(n ast.Node) bool { ident, isAnID := n.(*ast.Ident) return isAnID && ident.Obj == recID.Obj } - refs2recID := pick(n.Body, fselect, nil) + refs2recID := pick(n.Body, fselect) if len(refs2recID) > 0 { return nil // the receiver is referenced in the func body @@ -67,7 +123,7 @@ func (w lintUnusedReceiverRule) Visit(node ast.Node) ast.Visitor { Confidence: 1, Node: recID, Category: "bad practice", - Failure: fmt.Sprintf("method receiver '%s' is not referenced in method's body, consider removing or renaming it as _", recID.Name), + Failure: fmt.Sprintf(w.failureMsg, recID.Name), }) return nil // full method body already inspected diff --git a/vendor/github.com/mgechev/revive/rule/utils.go b/vendor/github.com/mgechev/revive/rule/utils.go index dca1674ca5..5778e76963 100644 --- a/vendor/github.com/mgechev/revive/rule/utils.go +++ b/vendor/github.com/mgechev/revive/rule/utils.go @@ -93,21 +93,15 @@ func srcLine(src []byte, p token.Position) string { // pick yields a list of nodes by picking them from a sub-ast with root node n. // Nodes are selected by applying the fselect function -// f function is applied to each selected node before inserting it in the final result. -// If f==nil then it defaults to the identity function (ie it returns the node itself) -func pick(n ast.Node, fselect func(n ast.Node) bool, f func(n ast.Node) []ast.Node) []ast.Node { +func pick(n ast.Node, fselect func(n ast.Node) bool) []ast.Node { var result []ast.Node if n == nil { return result } - if f == nil { - f = func(n ast.Node) []ast.Node { return []ast.Node{n} } - } - onSelect := func(n ast.Node) { - result = append(result, f(n)...) + result = append(result, n) } p := picker{fselect: fselect, onSelect: onSelect} ast.Walk(p, n) @@ -158,7 +152,7 @@ func isExprABooleanLit(n ast.Node) (lexeme string, ok bool) { } // gofmt returns a string representation of an AST subtree. -func gofmt(x interface{}) string { +func gofmt(x any) string { buf := bytes.Buffer{} fs := token.NewFileSet() printer.Fprint(&buf, fs, x) diff --git a/vendor/github.com/mgechev/revive/rule/var-naming.go b/vendor/github.com/mgechev/revive/rule/var-naming.go index 3c0c19cdf3..e91c22dc21 100644 --- a/vendor/github.com/mgechev/revive/rule/var-naming.go +++ b/vendor/github.com/mgechev/revive/rule/var-naming.go @@ -4,33 +4,82 @@ import ( "fmt" "go/ast" "go/token" + "regexp" "strings" "sync" "github.com/mgechev/revive/lint" ) +var anyCapsRE = regexp.MustCompile(`[A-Z]`) + +// regexp for constant names like `SOME_CONST`, `SOME_CONST_2`, `X123_3`, `_SOME_PRIVATE_CONST` (#851, #865) +var upperCaseConstRE = regexp.MustCompile(`^_?[A-Z][A-Z\d]*(_[A-Z\d]+)*$`) + // VarNamingRule lints given else constructs. type VarNamingRule struct { - configured bool - whitelist []string - blacklist []string + configured bool + allowlist []string + blocklist []string + upperCaseConst bool // if true - allows to use UPPER_SOME_NAMES for constants + skipPackageNameChecks bool sync.Mutex } func (r *VarNamingRule) configure(arguments lint.Arguments) { r.Lock() - if !r.configured { - if len(arguments) >= 1 { - r.whitelist = getList(arguments[0], "whitelist") - } + defer r.Unlock() + if r.configured { + return + } + + r.configured = true + if len(arguments) >= 1 { + r.allowlist = getList(arguments[0], "allowlist") + } + + if len(arguments) >= 2 { + r.blocklist = getList(arguments[1], "blocklist") + } - if len(arguments) >= 2 { - r.blacklist = getList(arguments[1], "blacklist") + if len(arguments) >= 3 { + // not pretty code because should keep compatibility with TOML (no mixed array types) and new map parameters + thirdArgument := arguments[2] + asSlice, ok := thirdArgument.([]any) + if !ok { + panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, got %T", "options", arguments[2])) } - r.configured = true + if len(asSlice) != 1 { + panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, but %d", "options", len(asSlice))) + } + args, ok := asSlice[0].(map[string]any) + if !ok { + panic(fmt.Sprintf("Invalid third argument to the var-naming rule. Expecting a %s of type slice, of len==1, with map, but %T", "options", asSlice[0])) + } + r.upperCaseConst = fmt.Sprint(args["upperCaseConst"]) == "true" + r.skipPackageNameChecks = fmt.Sprint(args["skipPackageNameChecks"]) == "true" + } +} + +func (r *VarNamingRule) applyPackageCheckRules(walker *lintNames) { + // Package names need slightly different handling than other names. + if strings.Contains(walker.fileAst.Name.Name, "_") && !strings.HasSuffix(walker.fileAst.Name.Name, "_test") { + walker.onFailure(lint.Failure{ + Failure: "don't use an underscore in package name", + Confidence: 1, + Node: walker.fileAst.Name, + Category: "naming", + }) + } + if anyCapsRE.MatchString(walker.fileAst.Name.Name) { + walker.onFailure(lint.Failure{ + Failure: fmt.Sprintf("don't use MixedCaps in package name; %s should be %s", walker.fileAst.Name.Name, strings.ToLower(walker.fileAst.Name.Name)), + Confidence: 1, + Node: walker.fileAst.Name, + Category: "naming", + }) } - r.Unlock() + } // Apply applies the rule to given file. @@ -44,21 +93,16 @@ func (r *VarNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint. walker := lintNames{ file: file, fileAst: fileAst, - whitelist: r.whitelist, - blacklist: r.blacklist, + allowlist: r.allowlist, + blocklist: r.blocklist, onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, + upperCaseConst: r.upperCaseConst, } - // Package names need slightly different handling than other names. - if strings.Contains(walker.fileAst.Name.Name, "_") && !strings.HasSuffix(walker.fileAst.Name.Name, "_test") { - walker.onFailure(lint.Failure{ - Failure: "don't use an underscore in package name", - Confidence: 1, - Node: walker.fileAst.Name, - Category: "naming", - }) + if !r.skipPackageNameChecks { + r.applyPackageCheckRules(&walker) } ast.Walk(&walker, fileAst) @@ -71,18 +115,18 @@ func (*VarNamingRule) Name() string { return "var-naming" } -func checkList(fl *ast.FieldList, thing string, w *lintNames) { +func (w *lintNames) checkList(fl *ast.FieldList, thing string) { if fl == nil { return } for _, f := range fl.List { for _, id := range f.Names { - check(id, thing, w) + w.check(id, thing) } } } -func check(id *ast.Ident, thing string, w *lintNames) { +func (w *lintNames) check(id *ast.Ident, thing string) { if id.Name == "_" { return } @@ -90,6 +134,12 @@ func check(id *ast.Ident, thing string, w *lintNames) { return } + // #851 upperCaseConst support + // if it's const + if thing == token.CONST.String() && w.upperCaseConst && upperCaseConstRE.MatchString(id.Name) { + return + } + // Handle two common styles from other languages that don't belong in Go. if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { w.onFailure(lint.Failure{ @@ -100,17 +150,8 @@ func check(id *ast.Ident, thing string, w *lintNames) { }) return } - if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { - should := string(id.Name[1]+'a'-'A') + id.Name[2:] - w.onFailure(lint.Failure{ - Failure: fmt.Sprintf("don't use leading k in Go names; %s %s should be %s", thing, id.Name, should), - Confidence: 0.8, - Node: id, - Category: "naming", - }) - } - should := lint.Name(id.Name, w.whitelist, w.blacklist) + should := lint.Name(id.Name, w.allowlist, w.blocklist) if id.Name == should { return } @@ -133,11 +174,12 @@ func check(id *ast.Ident, thing string, w *lintNames) { } type lintNames struct { - file *lint.File - fileAst *ast.File - onFailure func(lint.Failure) - whitelist []string - blacklist []string + file *lint.File + fileAst *ast.File + onFailure func(lint.Failure) + allowlist []string + blocklist []string + upperCaseConst bool } func (w *lintNames) Visit(n ast.Node) ast.Visitor { @@ -148,7 +190,7 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { } for _, exp := range v.Lhs { if id, ok := exp.(*ast.Ident); ok { - check(id, "var", w) + w.check(id, "var") } } case *ast.FuncDecl: @@ -170,31 +212,24 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { // not exported in the Go API. // See https://github.com/golang/lint/issues/144. if ast.IsExported(v.Name.Name) || !isCgoExported(v) { - check(v.Name, thing, w) + w.check(v.Name, thing) } - checkList(v.Type.Params, thing+" parameter", w) - checkList(v.Type.Results, thing+" result", w) + w.checkList(v.Type.Params, thing+" parameter") + w.checkList(v.Type.Results, thing+" result") case *ast.GenDecl: if v.Tok == token.IMPORT { return w } - var thing string - switch v.Tok { - case token.CONST: - thing = "const" - case token.TYPE: - thing = "type" - case token.VAR: - thing = "var" - } + + thing := v.Tok.String() for _, spec := range v.Specs { switch s := spec.(type) { case *ast.TypeSpec: - check(s.Name, thing, w) + w.check(s.Name, thing) case *ast.ValueSpec: for _, id := range s.Names { - check(id, thing, w) + w.check(id, thing) } } } @@ -206,31 +241,31 @@ func (w *lintNames) Visit(n ast.Node) ast.Visitor { if !ok { // might be an embedded interface name continue } - checkList(ft.Params, "interface method parameter", w) - checkList(ft.Results, "interface method result", w) + w.checkList(ft.Params, "interface method parameter") + w.checkList(ft.Results, "interface method result") } case *ast.RangeStmt: if v.Tok == token.ASSIGN { return w } if id, ok := v.Key.(*ast.Ident); ok { - check(id, "range var", w) + w.check(id, "range var") } if id, ok := v.Value.(*ast.Ident); ok { - check(id, "range var", w) + w.check(id, "range var") } case *ast.StructType: for _, f := range v.Fields.List { for _, id := range f.Names { - check(id, "struct field", w) + w.check(id, "struct field") } } } return w } -func getList(arg interface{}, argName string) []string { - temp, ok := arg.([]interface{}) +func getList(arg any, argName string) []string { + temp, ok := arg.([]any) if !ok { panic(fmt.Sprintf("Invalid argument to the var-naming rule. Expecting a %s of type slice with initialisms, got %T", argName, arg)) } diff --git a/vendor/github.com/moricho/tparallel/.goreleaser.yml b/vendor/github.com/moricho/tparallel/.goreleaser.yaml similarity index 54% rename from vendor/github.com/moricho/tparallel/.goreleaser.yml rename to vendor/github.com/moricho/tparallel/.goreleaser.yaml index e9f6d727e7..4a04fe25b5 100644 --- a/vendor/github.com/moricho/tparallel/.goreleaser.yml +++ b/vendor/github.com/moricho/tparallel/.goreleaser.yaml @@ -1,6 +1,4 @@ project_name: tparallel -env: - - GO111MODULE=on before: hooks: - go mod tidy @@ -13,17 +11,33 @@ builds: - -X main.Revision={{.ShortCommit}} env: - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + archives: - - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' - replacements: - darwin: darwin - linux: linux - windows: windows - 386: i386 - amd64: x86_64 + - format: tar.gz + name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} + {{- if .Arm }}v{{ .Arm }}{{ end }} format_overrides: - - goos: windows - format: zip + - goos: windows + format: zip +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' release: prerelease: auto brews: diff --git a/vendor/github.com/moricho/tparallel/README.md b/vendor/github.com/moricho/tparallel/README.md index cd358d1554..65ed46c422 100644 --- a/vendor/github.com/moricho/tparallel/README.md +++ b/vendor/github.com/moricho/tparallel/README.md @@ -1,37 +1,49 @@ # tparallel + [![tparallel](https://github.com/moricho/tparallel/workflows/tparallel/badge.svg?branch=master)](https://github.com/moricho/tparallel/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/moricho/tparallel)](https://goreportcard.com/report/github.com/moricho/tparallel) [![MIT License](http://img.shields.io/badge/license-MIT-blue.svg?style=flat)](LICENSE) `tparallel` finds inappropriate usage of `t.Parallel()` method in your Go test codes. -It detects the following: +It detects the following: + - `t.Parallel()` is called in either a top-level test function or a sub-test function only - Although `t.Parallel()` is called in the sub-test function, it is post-processed by `defer` instead of `t.Cleanup()` - -This tool was inspired by this blog: [Go言語でのテストの並列化 〜t.Parallel()メソッドを理解する〜](https://engineering.mercari.com/blog/entry/how_to_use_t_parallel/) + +This tool was inspired by this blog: [Go 言語でのテストの並列化 〜t.Parallel()メソッドを理解する〜](https://engineering.mercari.com/blog/entry/how_to_use_t_parallel/) ## Installation ### From GitHub Releases + Please see [GitHub Releases](https://github.com/moricho/tparallel/releases). Available binaries are: + - macOS - Linux - Windows ### macOS -``` sh + +```sh $ brew tap moricho/tparallel $ brew install tparallel ``` ### go get + ```sh $ go get -u github.com/moricho/tparallel/cmd/tparallel ``` ## Usage +### golangci-lint + +[golangci-lint](https://github.com/golangci/golangci-lint) now supports `tparallel`, so you can enable this linter and use in it. + +### shell + ```sh $ go vet -vettool=`which tparallel` ``` diff --git a/vendor/github.com/nbutton23/zxcvbn-go/.gitignore b/vendor/github.com/nbutton23/zxcvbn-go/.gitignore deleted file mode 100644 index 4bff1a28e4..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -zxcvbn -debug.test diff --git a/vendor/github.com/nbutton23/zxcvbn-go/Makefile b/vendor/github.com/nbutton23/zxcvbn-go/Makefile deleted file mode 100644 index 6aa13e0067..0000000000 --- a/vendor/github.com/nbutton23/zxcvbn-go/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -PKG_LIST = $$( go list ./... | grep -v /vendor/ | grep -v "zxcvbn-go/data" ) - -.DEFAULT_GOAL := help - -.PHONY: help -help: - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -.PHONY: test -test: ## Run `go test {Package list}` on the packages - go test $(PKG_LIST) - -.PHONY: lint -lint: ## Run `golint {Package list}` - golint $(PKG_LIST) \ No newline at end of file diff --git a/vendor/github.com/nishanths/exhaustive/.gitignore b/vendor/github.com/nishanths/exhaustive/.gitignore index 10acec6e16..94c13223bc 100644 --- a/vendor/github.com/nishanths/exhaustive/.gitignore +++ b/vendor/github.com/nishanths/exhaustive/.gitignore @@ -3,8 +3,10 @@ tags # binary -cmd/exhaustive/exhaustive -exhaustive +/cmd/exhaustive/exhaustive +/exhaustive # testing artifacts -coverage.out +/coverage.out + +/CHANGELOG.md diff --git a/vendor/github.com/nishanths/exhaustive/Makefile b/vendor/github.com/nishanths/exhaustive/Makefile index 981a7ebe99..868f7fce29 100644 --- a/vendor/github.com/nishanths/exhaustive/Makefile +++ b/vendor/github.com/nishanths/exhaustive/Makefile @@ -1,19 +1,33 @@ .PHONY: default default: build +.PHONY: all +all: build vet test + .PHONY: build build: go build ./... + go build ./cmd/exhaustive .PHONY: test test: - go test -cover ./... + go test -count=1 ./... + +.PHONY: testshort +testshort: + go test -short -count=1 ./... + +.PHONY: cover +cover: + go test -cover -coverprofile=coverage.out ./... + go tool cover -html=coverage.out .PHONY: install-vet install-vet: go install github.com/nishanths/exhaustive/cmd/exhaustive@latest go install github.com/gordonklaus/ineffassign@latest go install github.com/kisielk/errcheck@latest + go install honnef.co/go/tools/cmd/staticcheck@latest .PHONY: vet vet: @@ -21,6 +35,7 @@ vet: exhaustive ./... ineffassign ./... errcheck ./... + staticcheck -checks="inherit,-S1034" ./... .PHONY: upgrade-deps upgrade-deps: diff --git a/vendor/github.com/nishanths/exhaustive/README.md b/vendor/github.com/nishanths/exhaustive/README.md index a65d9de2f2..dbb41ab9d0 100644 --- a/vendor/github.com/nishanths/exhaustive/README.md +++ b/vendor/github.com/nishanths/exhaustive/README.md @@ -1,32 +1,45 @@ -## exhaustive [![Godoc][2]][1] +# exhaustive -Check exhaustiveness of enum switch statements and map literals in Go source code. +[![Godoc][godoc-svg]][godoc] + +`exhaustive` checks exhaustiveness of enum switch statements in Go source code. + +For the definition of enum and the definition of exhaustiveness used by this +program, see [godoc][godoc-doc]. For the changelog, see [CHANGELOG][changelog] +in the GitHub wiki. The program can be configured to additionally check +exhaustiveness of keys in map literals whose key type is an enum. + +## Usage + +Command: ``` go install github.com/nishanths/exhaustive/cmd/exhaustive@latest + +exhaustive [flags] [packages] ``` -For docs on the flags, the definition of enum, and the definition of -exhaustiveness, see [godocs.io][4]. +For available flags, refer to the [Flags][godoc-flags] section in godoc or run +`exhaustive -h`. -For the changelog, see [CHANGELOG][changelog] in the wiki. +Package: -The package provides an `Analyzer` that follows the guidelines in the -[`go/analysis`][3] package; this should make it possible to integrate -exhaustive with your own analysis driver program. +``` +go get github.com/nishanths/exhaustive -## Bugs +import "github.com/nishanths/exhaustive" +``` -`exhaustive` does not report missing cases if the switch statement -switches on a type parameterized type. See [this -issue](https://github.com/nishanths/exhaustive/issues/31) for details. +The `exhaustive.Analyzer` variable follows guidelines in the +[`golang.org/x/tools/go/analysis`][xanalysis] package. This should make it +possible to integrate `exhaustive` with your own analysis driver program. ## Example -Given the enum +Given an enum: -```go -package token +``` +package token // import "example.org/token" type Token int @@ -39,45 +52,53 @@ const ( ) ``` -and the code +and code that switches on the enum: -```go +``` package calc -import "token" +import "example.org/token" -func f(t token.Token) { +func x(t token.Token) { switch t { case token.Add: case token.Subtract: - case token.Multiply: + case token.Remainder: default: } } +``` -func g(t token.Token) string { - return map[token.Token]string{ - token.Add: "add", - token.Subtract: "subtract", - token.Multiply: "multiply", - }[t] -} +running `exhaustive` with default flags will produce: + +``` +calc.go:6:2: missing cases in switch of type token.Token: token.Multiply, token.Quotient ``` -running exhaustive will print +Specify flag `-check=switch,map` to additionally check exhaustiveness of keys +in map literals. For example: + +``` +var m = map[token.Token]rune{ + token.Add: '+', + token.Subtract: '-', + token.Multiply: '*', + token.Quotient: '/', +} +``` ``` -calc.go:6:2: missing cases in switch of type token.Token: Quotient, Remainder -calc.go:15:9: missing map keys of type token.Token: Quotient, Remainder +calc.go:14:9: missing keys in map of key type token.Token: token.Remainder ``` ## Contributing -Issues and pull requests are welcome. Before making a substantial -change, please discuss it in an issue. +Issues and changes are welcome. Please discuss substantial changes in an issue +first. -[1]: https://godocs.io/github.com/nishanths/exhaustive -[2]: https://godocs.io/github.com/nishanths/exhaustive?status.svg -[3]: https://pkg.go.dev/golang.org/x/tools/go/analysis -[4]: https://godocs.io/github.com/nishanths/exhaustive +[godoc]: https://pkg.go.dev/github.com/nishanths/exhaustive +[godoc-svg]: https://pkg.go.dev/badge/github.com/nishanths/exhaustive.svg +[godoc-doc]: https://pkg.go.dev/github.com/nishanths/exhaustive#section-documentation +[godoc-flags]: https://pkg.go.dev/github.com/nishanths/exhaustive#hdr-Flags +[xanalysis]: https://pkg.go.dev/golang.org/x/tools/go/analysis [changelog]: https://github.com/nishanths/exhaustive/wiki/CHANGELOG diff --git a/vendor/github.com/nishanths/exhaustive/comment.go b/vendor/github.com/nishanths/exhaustive/comment.go index 1232df1162..123e0181ba 100644 --- a/vendor/github.com/nishanths/exhaustive/comment.go +++ b/vendor/github.com/nishanths/exhaustive/comment.go @@ -3,80 +3,20 @@ package exhaustive import ( "go/ast" "go/token" - "regexp" "strings" ) -// Generated file definition -// http://golang.org/s/generatedcode -// -// To convey to humans and machine tools that code is generated, generated -// source should have a line that matches the following regular expression (in -// Go syntax): -// -// ^// Code generated .* DO NOT EDIT\.$ -// -// This line must appear before the first non-comment, non-blank -// text in the file. - -func isGeneratedFile(file *ast.File) bool { - // NOTE: file.Comments includes file.Doc as well, so no need - // to separately check file.Doc. - - for _, c := range file.Comments { - for _, cc := range c.List { - // This check is intended to handle "must appear before the - // first non-comment, non-blank text in the file". - // TODO: Is this check fully correct? Seems correct based - // on https://golang.org/ref/spec#Source_file_organization. - if c.Pos() >= file.Package { - return false - } - // According to the docs: - // '\r' has been removed. - // '\n' has been removed for //-style comments, which is what we care about. - // Also manually verified. - if isGeneratedFileComment(cc.Text) { - return true - } - } - } - - return false -} - -var generatedCodeRe = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) - -func isGeneratedFileComment(s string) bool { - return generatedCodeRe.MatchString(s) -} - -type generatedCache map[*ast.File]bool - -func (c generatedCache) IsGenerated(file *ast.File) bool { - if _, ok := c[file]; !ok { - c[file] = isGeneratedFile(file) - } - return c[file] -} - -// ignoreDirective is used to exclude checking of specific switch statements. -const ignoreDirective = "//exhaustive:ignore" -const enforceDirective = "//exhaustive:enforce" - -type commentsCache map[*ast.File]ast.CommentMap - -func (c commentsCache) GetComments(file *ast.File, set *token.FileSet) ast.CommentMap { - if _, ok := c[file]; !ok { - c[file] = ast.NewCommentMap(set, file, file.Comments) - } - return c[file] -} +const ( + ignoreComment = "//exhaustive:ignore" + enforceComment = "//exhaustive:enforce" + ignoreDefaultCaseRequiredComment = "//exhaustive:ignore-default-case-required" + enforceDefaultCaseRequiredComment = "//exhaustive:enforce-default-case-required" +) -func containsDirective(comments []*ast.CommentGroup, directive string) bool { +func hasCommentPrefix(comments []*ast.CommentGroup, comment string) bool { for _, c := range comments { for _, cc := range c.List { - if strings.HasPrefix(cc.Text, directive) { + if strings.HasPrefix(cc.Text, comment) { return true } } @@ -84,10 +24,6 @@ func containsDirective(comments []*ast.CommentGroup, directive string) bool { return false } -func containsEnforceDirective(comments []*ast.CommentGroup) bool { - return containsDirective(comments, enforceDirective) -} - -func containsIgnoreDirective(comments []*ast.CommentGroup) bool { - return containsDirective(comments, ignoreDirective) +func fileCommentMap(fset *token.FileSet, file *ast.File) ast.CommentMap { + return ast.NewCommentMap(fset, file, file.Comments) } diff --git a/vendor/github.com/nishanths/exhaustive/comment_go121.go b/vendor/github.com/nishanths/exhaustive/comment_go121.go new file mode 100644 index 0000000000..a7bbc8881c --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/comment_go121.go @@ -0,0 +1,11 @@ +//go:build go1.21 + +package exhaustive + +import ( + "go/ast" +) + +func isGeneratedFile(file *ast.File) bool { + return ast.IsGenerated(file) +} diff --git a/vendor/github.com/nishanths/exhaustive/comment_pre_go121.go b/vendor/github.com/nishanths/exhaustive/comment_pre_go121.go new file mode 100644 index 0000000000..28d2ed493e --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/comment_pre_go121.go @@ -0,0 +1,27 @@ +//go:build !go1.21 + +package exhaustive + +import ( + "go/ast" + "regexp" +) + +// For definition of generated file see: +// http://golang.org/s/generatedcode + +var generatedCodeRe = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) + +func isGeneratedFile(file *ast.File) bool { + for _, c := range file.Comments { + for _, cc := range c.List { + if cc.Pos() > file.Package { + break + } + if generatedCodeRe.MatchString(cc.Text) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/nishanths/exhaustive/common.go b/vendor/github.com/nishanths/exhaustive/common.go new file mode 100644 index 0000000000..f22b0e1479 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/common.go @@ -0,0 +1,474 @@ +package exhaustive + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "regexp" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/astutil" +) + +// enumTypeAndMembers combines an enumType and its members set. +type enumTypeAndMembers struct { + typ enumType + members enumMembers +} + +func fromNamed(pass *analysis.Pass, t *types.Named, typeparam bool) (result []enumTypeAndMembers, ok bool) { + if tpkg := t.Obj().Pkg(); tpkg == nil { + // go/types documentation says: nil for labels and + // objects in the Universe scope. This happens for the built-in + // error type for example. + return nil, false // not a valid enum type, so ok == false + } + + et := enumType{t.Obj()} + if em, ok := importFact(pass, et); ok { + return []enumTypeAndMembers{{et, em}}, true + } + + if typeparam { + // is it a named interface? + if intf, ok := t.Underlying().(*types.Interface); ok { + return fromInterface(pass, intf, typeparam) + } + } + + return nil, false // not a valid enum type, so ok == false +} + +func fromInterface(pass *analysis.Pass, intf *types.Interface, typeparam bool) (result []enumTypeAndMembers, ok bool) { + allOk := true + for i := 0; i < intf.NumEmbeddeds(); i++ { + r, ok := fromType(pass, intf.EmbeddedType(i), typeparam) + result = append(result, r...) + allOk = allOk && ok + } + return result, allOk +} + +func fromUnion(pass *analysis.Pass, union *types.Union, typeparam bool) (result []enumTypeAndMembers, ok bool) { + allOk := true + // gather from each term in the union. + for i := 0; i < union.Len(); i++ { + r, ok := fromType(pass, union.Term(i).Type(), typeparam) + result = append(result, r...) + allOk = allOk && ok + } + return result, allOk +} + +func fromTypeParam(pass *analysis.Pass, tp *types.TypeParam, typeparam bool) (result []enumTypeAndMembers, ok bool) { + // Does not appear to be explicitly documented, but based on Go language + // spec (see section Type constraints) and Go standard library source code, + // we can expect constraints to have underlying type *types.Interface + // Regardless it will be handled in fromType. + return fromType(pass, tp.Constraint().Underlying(), typeparam) +} + +func fromType(pass *analysis.Pass, t types.Type, typeparam bool) (result []enumTypeAndMembers, ok bool) { + switch t := t.(type) { + case *types.Named: + return fromNamed(pass, t, typeparam) + + case *types.Union: + return fromUnion(pass, t, typeparam) + + case *types.TypeParam: + return fromTypeParam(pass, t, typeparam) + + case *types.Interface: + if !typeparam { + return nil, true + } + // anonymous interface. + // e.g. func foo[T interface { M } | interface { N }](v T) {} + return fromInterface(pass, t, typeparam) + + default: + // ignore these. + return nil, true + } +} + +func composingEnumTypes(pass *analysis.Pass, t types.Type) (result []enumTypeAndMembers, ok bool) { + _, typeparam := t.(*types.TypeParam) + result, ok = fromType(pass, t, typeparam) + + if typeparam { + var kind types.BasicKind + var kindSet bool + + // sameBasicKind reports whether each type t that the function is called + // with has the same underlying basic kind. + sameBasicKind := func(t types.Type) (ok bool) { + basic, ok := t.Underlying().(*types.Basic) + if !ok { + return false + } + if kindSet && kind != basic.Kind() { + return false + } + kind = basic.Kind() + kindSet = true + return true + } + + for _, rr := range result { + if !sameBasicKind(rr.typ.TypeName.Type()) { + ok = false + break + } + } + } + + return result, ok +} + +func denotesPackage(ident *ast.Ident, info *types.Info) bool { + obj := info.ObjectOf(ident) + if obj == nil { + return false + } + _, ok := obj.(*types.PkgName) + return ok +} + +// exprConstVal returns the constantValue for an expression if the +// expression is a constant value and if the expression is considered +// valid to satisfy exhaustiveness as defined by this program. +// Otherwise it returns (_, false). +func exprConstVal(e ast.Expr, info *types.Info) (constantValue, bool) { + handleIdent := func(ident *ast.Ident) (constantValue, bool) { + obj := info.Uses[ident] + if obj == nil { + return "", false + } + if _, ok := obj.(*types.Const); !ok { + return "", false + } + // There are two scenarios. + // See related test cases in typealias/quux/quux.go. + // + // # Scenario 1 + // + // Tag package and constant package are the same. This is + // simple; we just use fs.ModeDir's value. + // Example: + // + // var mode fs.FileMode + // switch mode { + // case fs.ModeDir: + // } + // + // # Scenario 2 + // + // Tag package and constant package are different. In this + // scenario, too, we accept the case clause expr constant value, + // as is. If the Go type checker is okay with the name being + // listed in the case clause, we don't care much further. + // + // Example: + // + // var mode fs.FileMode + // switch mode { + // case os.ModeDir: + // } + // + // Or equivalently: + // + // // The type of mode is effectively fs.FileMode, + // // due to type alias. + // var mode os.FileMode + // switch mode { + // case os.ModeDir: + // } + return determineConstVal(ident, info), true + } + + e = stripTypeConversions(astutil.Unparen(e), info) + + switch e := e.(type) { + case *ast.Ident: + return handleIdent(e) + + case *ast.SelectorExpr: + x := astutil.Unparen(e.X) + // Ensure we only see the form pkg.Const, and not e.g. + // structVal.f or structVal.inner.f. + // + // For this purpose, first we check that X, which is everything + // except the rightmost field selector *ast.Ident (the Sel + // field), is also an *ast.Ident. + xIdent, ok := x.(*ast.Ident) + if !ok { + return "", false + } + // Second, check that it's a package. It doesn't matter which + // package, just that it denotes some package. + if !denotesPackage(xIdent, info) { + return "", false + } + return handleIdent(e.Sel) + + default: + // e.g. literal + // these aren't considered towards satisfying exhaustiveness. + return "", false + } +} + +// stripTypeConversions removing type conversions from the experession. +func stripTypeConversions(e ast.Expr, info *types.Info) ast.Expr { + c, ok := e.(*ast.CallExpr) + if !ok { + return e + } + typ := info.TypeOf(c.Fun) + if typ == nil { + // can happen for built-ins. + return e + } + // do not allow function calls. + if _, ok := typ.Underlying().(*types.Signature); ok { + return e + } + // type conversions have exactly one arg. + if len(c.Args) != 1 { + return e + } + return stripTypeConversions(astutil.Unparen(c.Args[0]), info) +} + +// member is a single member of an enum type. +type member struct { + pos token.Pos + typ enumType + name string + val constantValue +} + +type checklist struct { + info map[enumType]enumMembers + checkl map[member]struct{} + ignoreConstantRe *regexp.Regexp + ignoreTypeRe *regexp.Regexp +} + +func (c *checklist) ignoreConstant(pattern *regexp.Regexp) { + c.ignoreConstantRe = pattern +} + +func (c *checklist) ignoreType(pattern *regexp.Regexp) { + c.ignoreTypeRe = pattern +} + +func (*checklist) reMatch(re *regexp.Regexp, s string) bool { + if re == nil { + return false + } + return re.MatchString(s) +} + +func (c *checklist) add(et enumType, em enumMembers, includeUnexported bool) { + addOne := func(name string) { + if isBlankIdentifier(name) { + // Blank identifier is often used to skip entries in iota + // lists. Also, it can't be referenced anywhere (e.g. can't + // be referenced in switch statement cases) It doesn't make + // sense to include it as required member to satisfy + // exhaustiveness. + return + } + if !ast.IsExported(name) && !includeUnexported { + return + } + if c.reMatch(c.ignoreConstantRe, fmt.Sprintf("%s.%s", et.Pkg().Path(), name)) { + return + } + if c.reMatch(c.ignoreTypeRe, fmt.Sprintf("%s.%s", et.Pkg().Path(), et.TypeName.Name())) { + return + } + mem := member{ + em.NameToPos[name], + et, + name, + em.NameToValue[name], + } + if c.checkl == nil { + c.checkl = make(map[member]struct{}) + } + c.checkl[mem] = struct{}{} + } + + if c.info == nil { + c.info = make(map[enumType]enumMembers) + } + c.info[et] = em + + for _, name := range em.Names { + addOne(name) + } +} + +func (c *checklist) found(val constantValue) { + // delete all same-valued items. + for et, em := range c.info { + for _, name := range em.ValueToNames[val] { + delete(c.checkl, member{ + em.NameToPos[name], + et, + name, + em.NameToValue[name], + }) + } + } +} + +func (c *checklist) remaining() map[member]struct{} { + return c.checkl +} + +// group is a collection of same-valued members, possibly from +// different enum types. +type group []member + +func groupify(items map[member]struct{}, types []enumType) []group { + // indices maps each element in the input slice to its index. + indices := func(vs []enumType) map[enumType]int { + ret := make(map[enumType]int, len(vs)) + for i, v := range vs { + ret[v] = i + } + return ret + } + + typesOrder := indices(types) // for quick lookup + astBefore := func(x, y member) bool { + if typesOrder[x.typ] < typesOrder[y.typ] { + return true + } + if typesOrder[x.typ] > typesOrder[y.typ] { + return false + } + return x.pos < y.pos + } + + // byConstVal groups member names by constant value. + byConstVal := func(items map[member]struct{}) map[constantValue][]member { + ret := make(map[constantValue][]member) + for m := range items { + ret[m.val] = append(ret[m.val], m) + } + return ret + } + + var groups []group + for _, ms := range byConstVal(items) { + groups = append(groups, group(ms)) + } + + // sort members within each group in AST order. + for i := range groups { + g := groups[i] + sort.Slice(g, func(i, j int) bool { return astBefore(g[i], g[j]) }) + groups[i] = g + } + // sort groups themselves in AST order. + // the index [0] access is safe, because there will be at least one + // element per group. + sort.Slice(groups, func(i, j int) bool { return astBefore(groups[i][0], groups[j][0]) }) + + return groups +} + +func diagnosticEnumType(enumType *types.TypeName) string { + return enumType.Pkg().Name() + "." + enumType.Name() +} + +func diagnosticEnumTypes(types []enumType) string { + var buf strings.Builder + for i := range types { + buf.WriteString(diagnosticEnumType(types[i].TypeName)) + if i != len(types)-1 { + buf.WriteByte('|') + } + } + return buf.String() +} + +func diagnosticMember(m member) string { + return m.typ.Pkg().Name() + "." + m.name +} + +func diagnosticGroups(gs []group) string { + out := make([]string, len(gs)) + for i := range gs { + var buf strings.Builder + for j := range gs[i] { + buf.WriteString(diagnosticMember(gs[i][j])) + if j != len(gs[i])-1 { + buf.WriteByte('|') + } + } + out[i] = buf.String() + } + return strings.Join(out, ", ") +} + +func toEnumTypes(es []enumTypeAndMembers) []enumType { + out := make([]enumType, len(es)) + for i := range es { + out[i] = es[i].typ + } + return out +} + +func dedupEnumTypes(types []enumType) []enumType { + m := make(map[enumType]struct{}) + var ret []enumType + for _, t := range types { + _, ok := m[t] + if ok { + continue + } + m[t] = struct{}{} + ret = append(ret, t) + } + return ret +} + +type boolCache struct { + m map[*ast.File]bool + compute func(*ast.File) bool +} + +func (c *boolCache) get(file *ast.File) bool { + if _, ok := c.m[file]; !ok { + if c.m == nil { + c.m = make(map[*ast.File]bool) + } + c.m[file] = c.compute(file) + } + return c.m[file] +} + +type commentCache struct { + m map[*ast.File]ast.CommentMap + compute func(*token.FileSet, *ast.File) ast.CommentMap +} + +func (c *commentCache) get(fset *token.FileSet, file *ast.File) ast.CommentMap { + if _, ok := c.m[file]; !ok { + if c.m == nil { + c.m = make(map[*ast.File]ast.CommentMap) + } + c.m[file] = c.compute(fset, file) + } + return c.m[file] +} diff --git a/vendor/github.com/nishanths/exhaustive/doc.go b/vendor/github.com/nishanths/exhaustive/doc.go new file mode 100644 index 0000000000..a745247db3 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/doc.go @@ -0,0 +1,216 @@ +/* +Package exhaustive defines an analyzer that checks exhaustiveness of switch +statements of enum-like constants in Go source code. The analyzer can +optionally also check exhaustiveness of keys in map literals whose key type +is enum-like. + +# Definition of enum + +The Go [language spec] does not have an explicit definition for enums. For +the purpose of this analyzer, and by convention, an enum type is any named +type that: + + - has an [underlying type] of float, string, or integer (includes byte + and rune); and + - has at least one constant of its type defined in the same [block]. + +In the example below, Biome is an enum type. The three constants are its +enum members. + + package eco + + type Biome int + + const ( + Tundra Biome = 1 + Savanna Biome = 2 + Desert Biome = 3 + ) + +Enum member constants for an enum type must be declared in the same block as +the type. The constant values may be specified using iota, literal values, or +any valid means for declaring a Go constant. It is allowed for multiple enum +member constants for an enum type to have the same constant value. + +# Definition of exhaustiveness + +A switch statement that switches on a value of an enum type is exhaustive if +all enum members are listed in the switch statement's cases. If multiple enum +members have the same constant value, it is sufficient for any one of these +same-valued members to be listed. + +For an enum type defined in the same package as the switch statement, both +exported and unexported enum members must be listed to satisfy exhaustiveness. +For an enum type defined in an external package, it is sufficient that only +exported enum members are listed. Only constant identifiers (e.g. Tundra, +eco.Desert) listed in a switch statement's case clause can contribute towards +satisfying exhaustiveness; other expressions, such as literal values and +function calls, listed in case clauses do not contribute towards satisfying +exhaustiveness. + +By default, the existence of a default case in a switch statement does not +unconditionally make a switch statement exhaustive. Use the +-default-signifies-exhaustive flag to adjust this behavior. + +For a map literal whose key type is an enum type, a similar definition of +exhaustiveness applies. The map literal is considered exhaustive if all enum +members are be listed in its keys. Empty map literals are never checked for +exhaustiveness. + +# Type parameters + +A switch statement that switches on a value whose type is a type parameter is +checked for exhaustiveness if and only if each type element in the type +constraint is an enum type and the type elements share the same underlying +[BasicKind]. + +For example, the switch statement below will be checked because each type +element (i.e. M and N) in the type constraint is an enum type and the type +elements share the same underlying BasicKind, namely int8. To satisfy +exhaustiveness, the enum members collectively belonging to the enum types M +and N (i.e. A, B, and C) must be listed in the switch statement's cases. + + func bar[T M | I](v T) { + switch v { + case T(A): + case T(B): + case T(C): + } + } + + type I interface{ N } + + type M int8 + const A M = 1 + + type N int8 + const B N = 2 + const C N = 3 + +# Type aliases + +The analyzer handles type aliases as shown in the example below. newpkg.M is +an enum type. oldpkg.M is an alias for newpkg.M. Note that oldpkg.M isn't +itself an enum type; oldpkg.M is simply an alias for the actual enum type +newpkg.M. + + package oldpkg + type M = newpkg.M + const ( + A = newpkg.A + B = newpkg.B + ) + + package newpkg + type M int + const ( + A M = 1 + B M = 2 + ) + +A switch statement that switches either on a value of type newpkg.M or of type +oldpkg.M (which, being an alias, is just an alternative spelling for newpkg.M) +is exhaustive if all of newpkg.M's enum members are listed in the switch +statement's cases. The following switch statement is exhaustive. + + func f(v newpkg.M) { + switch v { + case newpkg.A: // or equivalently oldpkg.A + case newpkg.B: // or equivalently oldpkg.B + } + } + +The analyzer guarantees that introducing a type alias (such as type M = +newpkg.M) will not result in new diagnostics if the set of enum member +constant values of the RHS type is a subset of the set of enum member constant +values of the LHS type. + +# Flags + +Summary: + + flag type default value + ---- ---- ------------- + -check comma-separated strings switch + -explicit-exhaustive-switch bool false + -explicit-exhaustive-map bool false + -check-generated bool false + -default-signifies-exhaustive bool false + -ignore-enum-members regexp pattern (none) + -ignore-enum-types regexp pattern (none) + -package-scope-only bool false + +Descriptions: + + -check + Comma-separated list of program elements to check for + exhaustiveness. Supported program element values are + "switch" and "map". The default value is "switch", which + means that only switch statements are checked. + + -explicit-exhaustive-switch + Check a switch statement only if it is associated with a + "//exhaustive:enforce" comment. By default the analyzer + checks every switch statement that isn't associated with a + "//exhaustive:ignore" comment. + + -explicit-exhaustive-map + Similar to -explicit-exhaustive-switch but for map literals. + + -check-generated + Check generated files. For the definition of a generated + file, see https://golang.org/s/generatedcode. + + -default-signifies-exhaustive + Consider a switch statement to be exhaustive + unconditionally if it has a default case. (In other words, + all enum members do not have to be listed in its cases if a + default case is present.) Setting this flag usually is + counter to the purpose of exhaustiveness checks, so it is + not recommended to set this flag. + + -ignore-enum-members + Constants that match the specified regular expression (in + package regexp syntax) are not considered enum members and + hence do not have to be listed to satisfy exhaustiveness. + The specified regular expression is matched against the + constant name inclusive of import path. For example, if the + import path for the constant is "example.org/eco" and the + constant name is "Tundra", then the specified regular + expression is matched against the string + "example.org/eco.Tundra". + + -ignore-enum-types + Similar to -ignore-enum-members but for types. + + -package-scope-only + Only discover enums declared in file-level blocks. By + default, the analyzer discovers enums defined in all + blocks. + +# Skip analysis + +To skip analysis of a switch statement or a map literal, associate it with a +comment that begins with "//exhaustive:ignore". For example: + + //exhaustive:ignore ... an optional explanation goes here ... + switch v { + case A: + case B: + } + +To ignore specific constants in exhaustiveness checks, specify the +-ignore-enum-members flag: + + exhaustive -ignore-enum-members '^example\.org/eco\.Tundra$' + +To ignore specific types, specify the -ignore-enum-types flag: + + exhaustive -ignore-enum-types '^time\.Duration$|^example\.org/measure\.Unit$' + +[language spec]: https://golang.org/ref/spec +[underlying type]: https://golang.org/ref/spec#Underlying_types +[block]: https://golang.org/ref/spec#Blocks +[BasicKind]: https://pkg.go.dev/go/types#BasicKind +*/ +package exhaustive diff --git a/vendor/github.com/nishanths/exhaustive/enum.go b/vendor/github.com/nishanths/exhaustive/enum.go index 2b287e39a0..cabf1d880d 100644 --- a/vendor/github.com/nishanths/exhaustive/enum.go +++ b/vendor/github.com/nishanths/exhaustive/enum.go @@ -10,41 +10,48 @@ import ( "golang.org/x/tools/go/ast/inspector" ) -// constantValue is a constant.Value.ExactString(). +// constantValue is a (constant.Value).ExactString value. type constantValue string -// Represents an enum type (or a potential enum type). -// It is a defined (named) type's name. +// enumType represents an enum type as defined by this program, which +// effectively is a defined (named) type. type enumType struct{ *types.TypeName } func (et enumType) String() string { return et.TypeName.String() } // for debugging func (et enumType) scope() *types.Scope { return et.TypeName.Parent() } // scope that the type is declared in func (et enumType) factObject() types.Object { return et.TypeName } // types.Object for fact export -// enumMembers is the members for a single enum type. +// enumMembers is the set of enum members for a single enum type. // The zero value is ready to use. type enumMembers struct { - Names []string // enum member names, AST order + Names []string // enum member names + NameToPos map[string]token.Pos // enum member name -> AST position NameToValue map[string]constantValue // enum member name -> constant value ValueToNames map[constantValue][]string // constant value -> enum member names } -func (em *enumMembers) add(name string, val constantValue) { +// add adds an enum member to the set. +func (em *enumMembers) add(name string, val constantValue, pos token.Pos) { + if em.NameToPos == nil { + em.NameToPos = make(map[string]token.Pos) + } if em.NameToValue == nil { em.NameToValue = make(map[string]constantValue) } if em.ValueToNames == nil { em.ValueToNames = make(map[constantValue][]string) } - em.Names = append(em.Names, name) + em.NameToPos[name] = pos em.NameToValue[name] = val em.ValueToNames[val] = append(em.ValueToNames[val], name) } -func (em enumMembers) String() string { return em.factString() } // for debugging +func (em *enumMembers) String() string { + return em.factString() +} -func (em enumMembers) factString() string { +func (em *enumMembers) factString() string { var buf strings.Builder for j, vv := range em.Names { buf.WriteString(vv) @@ -74,7 +81,7 @@ func findEnums(pkgScopeOnly bool, pkg *types.Package, inspect *inspector.Inspect continue } v := result[enumTyp] - v.add(memberName, val) + v.add(memberName, val, name.Pos()) result[enumTyp] = v } } @@ -84,6 +91,25 @@ func findEnums(pkgScopeOnly bool, pkg *types.Package, inspect *inspector.Inspect } func possibleEnumMember(constName *ast.Ident, info *types.Info) (et enumType, name string, val constantValue, ok bool) { + // Notes + // + // type T int + // const A T = iota // obj.Type() is T + // + // type R T + // const B R = iota // obj.Type() is R + // + // type T2 int + // type T1 = T2 + // const C T1 = iota // obj.Type() is T2 + // + // type T3 = T4 + // type T4 int + // type T5 = T3 + // const D T5 = iota // obj.Type() is T4 + // + // In all these cases, validNamedBasic(obj.Type()) == true. + obj := info.Defs[constName] if obj == nil { panic(fmt.Sprintf("info.Defs[%s] == nil", constName)) @@ -91,44 +117,22 @@ func possibleEnumMember(constName *ast.Ident, info *types.Info) (et enumType, na if _, ok = obj.(*types.Const); !ok { panic(fmt.Sprintf("obj must be *types.Const, got %T", obj)) } - if isBlankIdentifier(obj) { + if isBlankIdentifier(obj.Name()) { // These objects have a nil parent scope. // Also, we have no real purpose to record them. return enumType{}, "", "", false } - - /* - NOTE: - - type T int - const A T = iota // obj.Type() is T - - type R T - const B R = iota // obj.Type() is R - - type T2 int - type T1 = T2 - const C T1 = iota // obj.Type() is T2 - - type T3 = T4 - type T4 int - type T5 = T3 - const D T5 = iota // obj.Type() is T4 - - // And, in all these cases, validNamedBasic(obj.Type()) == true. - */ - if !validNamedBasic(obj.Type()) { return enumType{}, "", "", false } - named := obj.Type().(*types.Named) // guaranteed by validNamedBasic() + named := obj.Type().(*types.Named) // guaranteed by validNamedBasic tn := named.Obj() - // Enum type's scope and enum member's scope must be the same. If they're - // not, don't consider the const a member. Additionally, the enum type and - // the enum member must be in the same package (the scope check accounts for - // this, too). + // By definition, enum type's scope and enum member's scope must be the + // same. If they're not, don't consider the const a member. Additionally, + // the enum type and the enum member must be in the same package (the + // scope check accounts for this, too). if tn.Parent() != obj.Parent() { return enumType{}, "", "", false } @@ -141,8 +145,8 @@ func determineConstVal(name *ast.Ident, info *types.Info) constantValue { return constantValue(c.Val().ExactString()) } -func isBlankIdentifier(obj types.Object) bool { - return obj.Name() == "_" // NOTE: go/types/decl.go does a direct comparison like this +func isBlankIdentifier(name string) bool { + return name == "_" // NOTE: go/types/decl.go does a direct comparison like this } func validBasic(basic *types.Basic) bool { @@ -154,10 +158,12 @@ func validBasic(basic *types.Basic) bool { } // validNamedBasic returns whether the type t is a named type whose underlying -// type is a valid basic type to form an enum. -// A type that passes this check meets the definition of an enum type. -// Note that -// validNamedBasic(t) == true => t.(*types.Named) +// type is a valid basic type to form an enum. A type that passes this check +// meets the definition of an enum type. +// +// The following is guaranteed: +// +// validNamedBasic(t) == true => t.(*types.Named) func validNamedBasic(t types.Type) bool { named, ok := t.(*types.Named) if !ok { diff --git a/vendor/github.com/nishanths/exhaustive/exhaustive.go b/vendor/github.com/nishanths/exhaustive/exhaustive.go index 8ec80e066c..013ac47bb7 100644 --- a/vendor/github.com/nishanths/exhaustive/exhaustive.go +++ b/vendor/github.com/nishanths/exhaustive/exhaustive.go @@ -1,251 +1,35 @@ -/* -Package exhaustive provides an analyzer that checks exhaustiveness of enum -switch statements and map literals in Go source code. - -# Definition of enum - -The Go language spec does not provide an explicit definition for an enum. For -the purpose of this analyzer, an enum type is any named type (a.k.a. defined -type) whose underlying type is an integer (includes byte and rune), a float, -or a string type. An enum type has associated with it constants of this named -type; these constants constitute the enum members. - -In the example below, Biome is an enum type with 3 members. - - type Biome int - - const ( - Tundra Biome = 1 - Savanna Biome = 2 - Desert Biome = 3 - ) - -For a constant to be an enum member for an enum type, the constant must be -declared in the same scope as the enum type. Note that the scope requirement -implies that only constants declared in the same package as the enum type's -package can constitute the enum members for the enum type. - -Enum member constants for a given enum type don't necessarily have to all be -declared in the same const block. Constant values may be specified using iota, -using explicit values, or by any means of declaring a valid Go const. It is -allowed for multiple enum member constants for a given enum type to have the -same constant value. - -# Definition of exhaustiveness - -A switch statement that switches on a value of an enum type is exhaustive if -all of the enum type's members are listed in the switch statement's cases. If -multiple enum member constants have the same constant value, it is sufficient -for any one of these same-valued members to be listed. - -For an enum type defined in the same package as the switch statement, both -exported and unexported enum members must be listed to satisfy exhaustiveness. -For an enum type defined in an external package, it is sufficient that only -exported enum members are listed. - -Only identifiers denoting constants (e.g. Tundra) and qualified identifiers -denoting constants (e.g. somepkg.Grassland) listed in a switch statement's -cases can contribute towards satisfying exhaustiveness. Literal values, struct -fields, re-assignable variables, etc. will not. - -The analyzer will produce a diagnostic about unhandled enum members if the -required memebers are not listed in a switch statement's cases (this applies -even if the switch statement has a 'default' case). - -# Map literals - -All of the above also applies to map literals in which the key type is an enum -type. Empty map literals are never checked. The -check flag must include -"map" for map literals to be checked. - -# Type aliases - -The analyzer handles type aliases for an enum type in the following manner. -Consider the example below. T2 is a enum type, and T1 is an alias for T2. Note -that we don't term T1 itself an enum type; it is only an alias for an enum -type. - - package pkg - type T1 = newpkg.T2 - const ( - A = newpkg.A - B = newpkg.B - ) - - package newpkg - type T2 int - const ( - A T2 = 1 - B T2 = 2 - ) - -Then a switch statement that switches on a value of type T1 (which, in -reality, is just an alternate spelling for type T2) is exhaustive if all of -T2's enum members are listed in the switch statement's cases. The same -conditions described in the previous section for same-valued enum members and -for exported/unexported enum members apply here too. - -It is worth noting that, though T1 and T2 are identical types, only constants -declared in the same scope as type T2's scope can be T2's enum members. In the -example, newpkg.A and newpkg.B are T2's enum members. - -The analyzer guarantees that introducing a type alias (such as type T1 = -newpkg.T2) will never result in new diagnostics from the analyzer, as long as -the set of enum member constant values of the new RHS type (newpkg.T2) is a -subset of the set of enum member constant values of the old LHS type (T1). - -# Advanced notes - -Non-enum member constants in a switch statement's cases: Recall from an -earlier section that a constant must be declared in the same scope as the enum -type to be an enum member. It is valid, however, both to the Go type checker -and to this analyzer, for any constant of the right type to be listed in the -cases of an enum switch statement (it does not necessarily have to be an enum -member constant declared in the same scope/package as the enum type's -scope/package). This is particularly useful when a type alias is involved: A -forwarding constant declaration (such as pkg.A, in type T1's package) can take -the place of the actual enum member constant (newpkg.A, in type T2's package) -in the switch statement's cases to satisfy exhaustiveness. - - var v pkg.T1 = pkg.ReturnsT1() // v is effectively of type newpkg.T2 due to alias - switch v { - case pkg.A: // valid substitute for newpkg.A (same constant value) - case pkg.B: // valid substitute for newpkg.B (same constant value) - } - -# Flags - -Notable flags supported by the analyzer are described below. -All of these flags are optional. - - flag type default value - - -check string switch - -explicit-exhaustive-switch bool false - -explicit-exhaustive-map bool false - -check-generated bool false - -default-signifies-exhaustive bool false - -ignore-enum-members string (none) - -package-scope-only bool false - -The -check flag specifies the program elements that should be checked for -exhaustiveness. By default, only switch statements are checked. Specify --check=switch,map to also check map literals. - -If the -explicit-exhaustive-switch flag is enabled, the analyzer only runs on -switch statements explicitly marked with the comment text -("exhaustive:enforce"). Otherwise, it runs on every enum switch statement not -marked with the comment text ("exhaustive:ignore"). - -If the -explicit-exhaustive-map flag is enabled, the analyzer only runs on -map literals explicitly marked with the comment text -("exhaustive:enforce"). Otherwise, it runs on every enum map literal not -marked with the comment text ("exhaustive:ignore"). - -If the -check-generated flag is enabled, switch statements in generated Go -source files are also checked. Otherwise, by default, switch statements in -generated files are not checked. See https://golang.org/s/generatedcode for the -definition of generated file. - -If the -default-signifies-exhaustive flag is enabled, the presence of a -'default' case in a switch statement always satisfies exhaustiveness, even if -all enum members are not listed. It is not recommended that you enable this -flag; enabling it generally defeats the purpose of exhaustiveness checking. - -The -ignore-enum-members flag specifies a regular expression in Go syntax. Enum -members matching the regular expression don't have to be listed in switch -statement cases to satisfy exhaustiveness. The specified regular expression is -matched against an enum member name inclusive of the enum package import path: -for example, if the enum package import path is "example.com/pkg" and the member -name is "Tundra", the specified regular expression will be matched against the -string "example.com/pkg.Tundra". - -If the -package-scope-only flag is enabled, the analyzer only finds enums -defined in package scopes, and consequently only switch statements that switch -on package-scoped enums will be checked for exhaustiveness. By default, the -analyzer finds enums defined in all scopes, and checks switch statements that -switch on all these enums. - -# Skip analysis - -In implicitly exhaustive switch mode (-explicit-exhaustive-switch=false), skip -checking of a specific switch statement by associating the comment shown in -the example below with the switch statement. Note the lack of whitespace -between the comment marker ("//") and the comment text ("exhaustive:ignore"). - - //exhaustive:ignore - switch v { ... } - -In explicitly exhaustive switch mode (-explicit-exhaustive-switch=true), run -exhaustiveness checks on a specific switch statement by associating the -comment shown in the example below with the switch statement. - - //exhaustive:enforce - switch v { ... } - -To ignore specific enum members, see the -ignore-enum-members flag. - -Switch statements in generated Go source files are not checked by default. -Use the -check-generated flag to change this behavior. -*/ package exhaustive import ( - "flag" + "fmt" "go/ast" - "regexp" - "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" ) -var _ flag.Value = (*regexpFlag)(nil) - -// regexpFlag implements the flag.Value interface for parsing -// regular expression flag values. -type regexpFlag struct{ r *regexp.Regexp } - -func (v *regexpFlag) String() string { - if v == nil || v.r == nil { - return "" - } - return v.r.String() -} - -func (v *regexpFlag) Set(expr string) error { - if expr == "" { - v.r = nil - return nil - } - - r, err := regexp.Compile(expr) - if err != nil { - return err - } - - v.r = r - return nil +func init() { + registerFlags() } -func (v *regexpFlag) value() *regexp.Regexp { return v.r } - -func init() { - Analyzer.Flags.StringVar(&fCheck, CheckFlag, checkSwitch, "program elements to check for exhaustiveness") - Analyzer.Flags.BoolVar(&fExplicitExhaustiveSwitch, ExplicitExhaustiveSwitchFlag, false, "only run exhaustive check on switches with \"//exhaustive:enforce\" comment") - Analyzer.Flags.BoolVar(&fExplicitExhaustiveMap, ExplicitExhaustiveMapFlag, false, "only run exhaustive check on map literals with \"//exhaustive:enforce\" comment") - Analyzer.Flags.BoolVar(&fCheckGenerated, CheckGeneratedFlag, false, "check switch statements in generated files") - Analyzer.Flags.BoolVar(&fDefaultSignifiesExhaustive, DefaultSignifiesExhaustiveFlag, false, "presence of \"default\" case in switch statements satisfies exhaustiveness, even if all enum members are not listed") - Analyzer.Flags.Var(&fIgnoreEnumMembers, IgnoreEnumMembersFlag, "enum members matching `regex` do not have to be listed in switch statements to satisfy exhaustiveness") - Analyzer.Flags.BoolVar(&fPackageScopeOnly, PackageScopeOnlyFlag, false, "consider enums only in package scopes, not in inner scopes") +func registerFlags() { + Analyzer.Flags.Var(&fCheck, CheckFlag, "comma-separated list of program `elements` to check for exhaustiveness; supported element values: switch, map") + Analyzer.Flags.BoolVar(&fExplicitExhaustiveSwitch, ExplicitExhaustiveSwitchFlag, false, `check switch statement only if associated with "//exhaustive:enforce" comment`) + Analyzer.Flags.BoolVar(&fExplicitExhaustiveMap, ExplicitExhaustiveMapFlag, false, `check map literal only if associated with "//exhaustive:enforce" comment`) + Analyzer.Flags.BoolVar(&fCheckGenerated, CheckGeneratedFlag, false, "check generated files") + Analyzer.Flags.BoolVar(&fDefaultSignifiesExhaustive, DefaultSignifiesExhaustiveFlag, false, "switch statement is unconditionally exhaustive if it has a default case") + Analyzer.Flags.BoolVar(&fDefaultCaseRequired, DefaultCaseRequiredFlag, false, "switch statement requires default case even if exhaustive") + Analyzer.Flags.Var(&fIgnoreEnumMembers, IgnoreEnumMembersFlag, "ignore constants matching `regexp`") + Analyzer.Flags.Var(&fIgnoreEnumTypes, IgnoreEnumTypesFlag, "ignore types matching `regexp`") + Analyzer.Flags.BoolVar(&fPackageScopeOnly, PackageScopeOnlyFlag, false, "only discover enums declared in file-level blocks") var unused string - Analyzer.Flags.StringVar(&unused, IgnorePatternFlag, "", "no effect (deprecated); see -"+IgnoreEnumMembersFlag+" instead") + Analyzer.Flags.StringVar(&unused, IgnorePatternFlag, "", "no effect (deprecated); use -"+IgnoreEnumMembersFlag) Analyzer.Flags.StringVar(&unused, CheckingStrategyFlag, "", "no effect (deprecated)") } -// Flag names used by the analyzer. They are exported for use by analyzer +// Flag names used by the analyzer. These are exported for use by analyzer // driver programs. const ( CheckFlag = "check" @@ -253,115 +37,116 @@ const ( ExplicitExhaustiveMapFlag = "explicit-exhaustive-map" CheckGeneratedFlag = "check-generated" DefaultSignifiesExhaustiveFlag = "default-signifies-exhaustive" + DefaultCaseRequiredFlag = "default-case-required" IgnoreEnumMembersFlag = "ignore-enum-members" + IgnoreEnumTypesFlag = "ignore-enum-types" PackageScopeOnlyFlag = "package-scope-only" - IgnorePatternFlag = "ignore-pattern" // Deprecated: see IgnoreEnumMembersFlag instead. - CheckingStrategyFlag = "checking-strategy" // Deprecated. + // Deprecated flag names. + IgnorePatternFlag = "ignore-pattern" // Deprecated: use IgnoreEnumMembersFlag. + CheckingStrategyFlag = "checking-strategy" // Deprecated: no longer applicable. ) +// Flag values. var ( - fCheck string + fCheck = stringsFlag{elements: defaultCheckElements, filter: validCheckElement} fExplicitExhaustiveSwitch bool fExplicitExhaustiveMap bool fCheckGenerated bool fDefaultSignifiesExhaustive bool + fDefaultCaseRequired bool fIgnoreEnumMembers regexpFlag + fIgnoreEnumTypes regexpFlag fPackageScopeOnly bool ) -const ( - checkSwitch = "switch" - checkMap = "map" -) - -// resetFlags resets the flag variables to their default values. +// resetFlags resets the flag variables to default values. // Useful in tests. func resetFlags() { - fCheck = checkSwitch + fCheck = stringsFlag{elements: defaultCheckElements, filter: validCheckElement} fExplicitExhaustiveSwitch = false fExplicitExhaustiveMap = false fCheckGenerated = false fDefaultSignifiesExhaustive = false + fDefaultCaseRequired = false fIgnoreEnumMembers = regexpFlag{} + fIgnoreEnumTypes = regexpFlag{} fPackageScopeOnly = false } +// checkElement is a program element supported by the -check flag. +type checkElement string + +const ( + elementSwitch checkElement = "switch" + elementMap checkElement = "map" +) + +func validCheckElement(s string) error { + switch checkElement(s) { + case elementSwitch: + return nil + case elementMap: + return nil + default: + return fmt.Errorf("invalid program element %q", s) + } +} + +var defaultCheckElements = []string{ + string(elementSwitch), +} + var Analyzer = &analysis.Analyzer{ Name: "exhaustive", - Doc: "check exhaustiveness of enum switch statements and map literals", + Doc: "check exhaustiveness of enum switch statements", Run: run, Requires: []*analysis.Analyzer{inspect.Analyzer}, FactTypes: []analysis.Fact{&enumMembersFact{}}, } func run(pass *analysis.Pass) (interface{}, error) { - checks := make(map[string]bool) - for _, v := range strings.Split(fCheck, ",") { - v = strings.TrimSpace(v) - switch v { - case checkSwitch: - checks[checkSwitch] = true - case checkMap: - checks[checkMap] = true - } - } - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - for typ, members := range findEnums( - fPackageScopeOnly, - pass.Pkg, - inspect, - pass.TypesInfo, - ) { + for typ, members := range findEnums(fPackageScopeOnly, pass.Pkg, inspect, pass.TypesInfo) { exportFact(pass, typ, members) } - generated := make(generatedCache) - comments := make(commentsCache) - - swChecker := switchChecker( - pass, - switchConfig{ - explicitExhaustiveSwitch: fExplicitExhaustiveSwitch, - defaultSignifiesExhaustive: fDefaultSignifiesExhaustive, - checkGeneratedFiles: fCheckGenerated, - ignoreEnumMembers: fIgnoreEnumMembers.value(), - }, - generated, - comments, - ) - - mapChecker := mapChecker( - pass, - mapConfig{ - explicitExhaustiveMap: fExplicitExhaustiveMap, - checkGeneratedFiles: fCheckGenerated, - ignoreEnumMembers: fIgnoreEnumMembers.value(), - }, - generated, - comments, - ) - - var types []ast.Node - if checks[checkSwitch] { - types = append(types, &ast.SwitchStmt{}) - } - if checks[checkMap] { - types = append(types, &ast.CompositeLit{}) - } - - inspect.WithStack(types, func(n ast.Node, push bool, stack []ast.Node) bool { - var proceed bool - switch n.(type) { - case *ast.SwitchStmt: - proceed, _ = swChecker(n, push, stack) - case *ast.CompositeLit: - proceed, _ = mapChecker(n, push, stack) + generated := boolCache{compute: isGeneratedFile} + comments := commentCache{compute: fileCommentMap} + + // NOTE: should not share the same inspect.WithStack call for different + // program elements: the visitor function for a program element may + // exit traversal early, but this shouldn't affect traversal for + // other program elements. + for _, e := range fCheck.elements { + switch checkElement(e) { + case elementSwitch: + conf := switchConfig{ + explicit: fExplicitExhaustiveSwitch, + defaultSignifiesExhaustive: fDefaultSignifiesExhaustive, + defaultCaseRequired: fDefaultCaseRequired, + checkGenerated: fCheckGenerated, + ignoreConstant: fIgnoreEnumMembers.re, + ignoreType: fIgnoreEnumTypes.re, + } + checker := switchChecker(pass, conf, generated, comments) + inspect.WithStack([]ast.Node{&ast.SwitchStmt{}}, toVisitor(checker)) + + case elementMap: + conf := mapConfig{ + explicit: fExplicitExhaustiveMap, + checkGenerated: fCheckGenerated, + ignoreConstant: fIgnoreEnumMembers.re, + ignoreType: fIgnoreEnumTypes.re, + } + checker := mapChecker(pass, conf, generated, comments) + inspect.WithStack([]ast.Node{&ast.CompositeLit{}}, toVisitor(checker)) + + default: + panic(fmt.Sprintf("unknown checkElement %v", e)) } - return proceed - }) + } return nil, nil } diff --git a/vendor/github.com/nishanths/exhaustive/flag.go b/vendor/github.com/nishanths/exhaustive/flag.go new file mode 100644 index 0000000000..49d3d3c6c0 --- /dev/null +++ b/vendor/github.com/nishanths/exhaustive/flag.go @@ -0,0 +1,75 @@ +package exhaustive + +import ( + "flag" + "regexp" + "strings" +) + +var _ flag.Value = (*regexpFlag)(nil) +var _ flag.Value = (*stringsFlag)(nil) + +// regexpFlag implements flag.Value for parsing +// regular expression flag inputs. +type regexpFlag struct{ re *regexp.Regexp } + +func (f *regexpFlag) String() string { + if f == nil || f.re == nil { + return "" + } + return f.re.String() +} + +func (f *regexpFlag) Set(expr string) error { + if expr == "" { + f.re = nil + return nil + } + + re, err := regexp.Compile(expr) + if err != nil { + return err + } + + f.re = re + return nil +} + +// stringsFlag implements flag.Value for parsing a comma-separated string +// list. Surrounding whitespace is stripped from the input and from each +// element. If filter is non-nil it is called for each element in the input. +type stringsFlag struct { + elements []string + filter func(string) error +} + +func (f *stringsFlag) String() string { + if f == nil { + return "" + } + return strings.Join(f.elements, ",") +} + +func (f *stringsFlag) filterFunc() func(string) error { + if f.filter != nil { + return f.filter + } + return func(_ string) error { return nil } +} + +func (f *stringsFlag) Set(input string) error { + input = strings.TrimSpace(input) + if input == "" { + f.elements = nil + return nil + } + + for _, el := range strings.Split(input, ",") { + el = strings.TrimSpace(el) + if err := f.filterFunc()(el); err != nil { + return err + } + f.elements = append(f.elements, el) + } + return nil +} diff --git a/vendor/github.com/nishanths/exhaustive/map.go b/vendor/github.com/nishanths/exhaustive/map.go index 1b86fe5bee..23b4bef1ce 100644 --- a/vendor/github.com/nishanths/exhaustive/map.go +++ b/vendor/github.com/nishanths/exhaustive/map.go @@ -5,35 +5,30 @@ import ( "go/ast" "go/types" "regexp" - "strings" "golang.org/x/tools/go/analysis" ) // mapConfig is configuration for mapChecker. type mapConfig struct { - explicitExhaustiveMap bool - checkGeneratedFiles bool - ignoreEnumMembers *regexp.Regexp // can be nil + explicit bool + checkGenerated bool + ignoreConstant *regexp.Regexp // can be nil + ignoreType *regexp.Regexp // can be nil } -// mapChecker returns a node visitor that checks exhaustiveness -// of enum keys in map literal for the supplied pass, and reports diagnostics if non-exhaustive. -// It expects to only see *ast.CompositeLit nodes. -func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, comments commentsCache) nodeVisitor { +// mapChecker returns a node visitor that checks for exhaustiveness of +// map literals for the supplied pass, and reports diagnostics. The +// node visitor expects only *ast.CompositeLit nodes. +func mapChecker(pass *analysis.Pass, cfg mapConfig, generated boolCache, comments commentCache) nodeVisitor { return func(n ast.Node, push bool, stack []ast.Node) (bool, string) { if !push { - // The proceed return value should not matter; it is ignored by - // inspector package for pop calls. - // Nevertheless, return true to be on the safe side for the future. return true, resultNotPush } file := stack[0].(*ast.File) - if !cfg.checkGeneratedFiles && generated.IsGenerated(file) { - // Don't check this file. - // Return false because the children nodes of node `n` don't have to be checked. + if !cfg.checkGenerated && generated.get(file) { return false, resultGeneratedFile } @@ -45,7 +40,6 @@ func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, co if !ok2 { return true, resultNotMapLiteral } - mapType, ok = namedType.Underlying().(*types.Map) if !ok { return true, resultNotMapLiteral @@ -53,23 +47,19 @@ func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, co } if len(lit.Elts) == 0 { - // because it may be used as an alternative for make(map[...]...) return false, resultEmptyMapLiteral } - keyType, ok := mapType.Key().(*types.Named) - if !ok { - return true, resultMapKeyIsNotNamedType - } - - fileComments := comments.GetComments(file, pass.Fset) + fileComments := comments.get(pass.Fset, file) var relatedComments []*ast.CommentGroup for i := range stack { - // iterate over stack in the reverse order (from bottom to top) + // iterate over stack in the reverse order (from inner + // node to outer node) node := stack[len(stack)-1-i] switch node.(type) { // need to check comments associated with following nodes, - // because logic of ast package doesn't allow to associate comment with *ast.CompositeLit + // because logic of ast package doesn't associate comment + // with *ast.CompositeLit as required. case *ast.CompositeLit, // stack[len(stack)-1] *ast.ReturnStmt, // return ... *ast.IndexExpr, // map[enum]...{...}[key] @@ -81,68 +71,64 @@ func mapChecker(pass *analysis.Pass, cfg mapConfig, generated generatedCache, co *ast.ValueSpec: // var declaration relatedComments = append(relatedComments, fileComments[node]...) continue + default: + // stop iteration on the first inappropriate node + break } - // stop iteration on the first inappropriate node - break } - if !cfg.explicitExhaustiveMap && containsIgnoreDirective(relatedComments) { - // Skip checking of this map literal due to ignore directive comment. - // Still return true because there may be nested map literals - // that are not to be ignored. - return true, resultMapIgnoreComment + if !cfg.explicit && hasCommentPrefix(relatedComments, ignoreComment) { + // Skip checking of this map literal due to ignore + // comment. Still return true because there may be nested + // map literals that are not to be ignored. + return true, resultIgnoreComment } - if cfg.explicitExhaustiveMap && !containsEnforceDirective(relatedComments) { - // Skip checking of this map literal due to missing enforce directive comment. - return true, resultMapNoEnforceComment + if cfg.explicit && !hasCommentPrefix(relatedComments, enforceComment) { + return true, resultNoEnforceComment } - keyPkg := keyType.Obj().Pkg() - if keyPkg == nil { - // The Go documentation says: nil for labels and objects in the Universe scope. - // This happens for the `error` type, for example. - return true, resultNilMapKeyTypePkg + es, ok := composingEnumTypes(pass, mapType.Key()) + if !ok || len(es) == 0 { + return true, resultEnumTypes } - enumTyp := enumType{keyType.Obj()} - members, ok := importFact(pass, enumTyp) - if !ok { - return true, resultMapKeyNotEnum - } + var checkl checklist + checkl.ignoreConstant(cfg.ignoreConstant) + checkl.ignoreType(cfg.ignoreType) - samePkg := keyPkg == pass.Pkg // do the map literal and the map key type (i.e. enum type) live in the same package? - checkUnexported := samePkg // we want to include unexported members in the exhaustiveness check only if we're in the same package - checklist := makeChecklist(members, keyPkg, checkUnexported, cfg.ignoreEnumMembers) - - for _, e := range lit.Elts { - expr, ok := e.(*ast.KeyValueExpr) - if !ok { - continue // is it possible for valid map literal? - } - analyzeCaseClauseExpr(expr.Key, pass.TypesInfo, checklist.found) + for _, e := range es { + checkl.add(e.typ, e.members, pass.Pkg == e.typ.Pkg()) } - if len(checklist.remaining()) == 0 { - // All enum members accounted for. - // Nothing to report. + analyzeMapLiteral(lit, pass.TypesInfo, checkl.found) + if len(checkl.remaining()) == 0 { return true, resultEnumMembersAccounted } - - pass.Report(makeMapDiagnostic(lit, samePkg, enumTyp, members, checklist.remaining())) + pass.Report(makeMapDiagnostic(lit, dedupEnumTypes(toEnumTypes(es)), checkl.remaining())) return true, resultReportedDiagnostic } } -// Makes a "missing map keys" diagnostic. -// samePkg should be true if the enum type and the map literal are defined in the same package. -func makeMapDiagnostic(lit *ast.CompositeLit, samePkg bool, enumTyp enumType, allMembers enumMembers, missingMembers map[string]struct{}) analysis.Diagnostic { - message := fmt.Sprintf("missing map keys of type %s: %s", - diagnosticEnumTypeName(enumTyp.TypeName, samePkg), - strings.Join(diagnosticMissingMembers(missingMembers, allMembers), ", ")) +func analyzeMapLiteral(lit *ast.CompositeLit, info *types.Info, each func(constantValue)) { + for _, e := range lit.Elts { + expr, ok := e.(*ast.KeyValueExpr) + if !ok { + continue + } + if val, ok := exprConstVal(expr.Key, info); ok { + each(val) + } + } +} +func makeMapDiagnostic(lit *ast.CompositeLit, enumTypes []enumType, missing map[member]struct{}) analysis.Diagnostic { return analysis.Diagnostic{ - Pos: lit.Pos(), - End: lit.End(), - Message: message, + Pos: lit.Pos(), + End: lit.End(), + Message: fmt.Sprintf( + "missing keys in map of key type %s: %s", + diagnosticEnumTypes(enumTypes), + diagnosticGroups(groupify(missing, enumTypes)), + ), } } diff --git a/vendor/github.com/nishanths/exhaustive/switch.go b/vendor/github.com/nishanths/exhaustive/switch.go index 115c317e8e..d235fbec47 100644 --- a/vendor/github.com/nishanths/exhaustive/switch.go +++ b/vendor/github.com/nishanths/exhaustive/switch.go @@ -8,45 +8,97 @@ import ( "strings" "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/ast/astutil" ) -// nodeVisitor is like the visitor function used by Inspector.WithStack, +// nodeVisitor is like the visitor function used by inspector.WithStack, // except that it returns an additional value: a short description of // the result of this node visit. // -// The result is typically useful in debugging or in unit tests to check +// The result value is typically useful in debugging or in unit tests to check // that the nodeVisitor function took the expected code path. type nodeVisitor func(n ast.Node, push bool, stack []ast.Node) (proceed bool, result string) -// Result values returned by a node visitor constructed via switchChecker. +// toVisitor converts a nodeVisitor to a function suitable for use +// with inspector.WithStack. +func toVisitor(v nodeVisitor) func(ast.Node, bool, []ast.Node) bool { + return func(node ast.Node, push bool, stack []ast.Node) bool { + proceed, _ := v(node, push, stack) + return proceed + } +} + +// Result values returned by node visitors. const ( - resultNotPush = "not push" - resultGeneratedFile = "generated file" - resultNoSwitchTag = "no switch tag" - resultEmptyMapLiteral = "empty map literal" - resultNotMapLiteral = "not map literal" - resultMapKeyIsNotNamedType = "map key is not named type" - resultNilMapKeyTypePkg = "nil map key type package" - resultMapKeyNotEnum = "map key not known enum type" - resultMapIgnoreComment = "map literal has ignore comment" - resultMapNoEnforceComment = "map literal has no enforce comment" - resultTagNotValue = "switch tag not value type" - resultTagNotNamed = "switch tag not named type" - resultTagNoPkg = "switch tag does not belong to regular package" - resultTagNotEnum = "switch tag not known enum type" - resultSwitchIgnoreComment = "switch statement has ignore comment" - resultSwitchNoEnforceComment = "switch statement has no enforce comment" - resultEnumMembersAccounted = "requisite enum members accounted for" - resultDefaultCaseSuffices = "default case presence satisfies exhaustiveness" - resultReportedDiagnostic = "reported diagnostic" + resultEmptyMapLiteral = "empty map literal" + resultNotMapLiteral = "not map literal" + resultKeyNilPkg = "nil map key package" + resultKeyNotEnum = "not all map key type terms are known enum types" + + resultNoSwitchTag = "no switch tag" + resultTagNotValue = "switch tag not value type" + resultTagNilPkg = "nil switch tag package" + resultTagNotEnum = "not all switch tag terms are known enum types" + + resultNotPush = "not push" + resultGeneratedFile = "generated file" + resultIgnoreComment = "has ignore comment" + resultNoEnforceComment = "has no enforce comment" + resultEnumMembersAccounted = "required enum members accounted for" + resultDefaultCaseSuffices = "default case satisfies exhaustiveness" + resultMissingDefaultCase = "missing required default case" + resultReportedDiagnostic = "reported diagnostic" + resultEnumTypes = "invalid or empty composing enum types" ) -// switchChecker returns a node visitor that checks exhaustiveness -// of enum switch statements for the supplied pass, and reports diagnostics for -// switch statements that are non-exhaustive. -// It expects to only see *ast.SwitchStmt nodes. -func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCache, comments commentsCache) nodeVisitor { +// switchConfig is configuration for switchChecker. +type switchConfig struct { + explicit bool + defaultSignifiesExhaustive bool + defaultCaseRequired bool + checkGenerated bool + ignoreConstant *regexp.Regexp // can be nil + ignoreType *regexp.Regexp // can be nil +} + +// There are few possibilities, and often none, so we use a possibly-nil slice +func userDirectives(comments []*ast.CommentGroup) []string { + var directives []string + for _, c := range comments { + for _, cc := range c.List { + // The order matters here: we always want to check the longest first. + for _, d := range []string{ + enforceDefaultCaseRequiredComment, + ignoreDefaultCaseRequiredComment, + enforceComment, + ignoreComment, + } { + if strings.HasPrefix(cc.Text, d) { + directives = append(directives, d) + // The break here is important: once we associate a comment + // with a particular (longest-possible) directive, we don't want + // to map to another! + break + } + } + } + } + return directives +} + +// Can be replaced with slices.Contains with go1.21 +func directivesIncludes(directives []string, d string) bool { + for _, ud := range directives { + if ud == d { + return true + } + } + return false +} + +// switchChecker returns a node visitor that checks exhaustiveness of +// enum switch statements for the supplied pass, and reports +// diagnostics. The node visitor expects only *ast.SwitchStmt nodes. +func switchChecker(pass *analysis.Pass, cfg switchConfig, generated boolCache, comments commentCache) nodeVisitor { return func(n ast.Node, push bool, stack []ast.Node) (bool, string) { if !push { // The proceed return value should not matter; it is ignored by @@ -57,7 +109,7 @@ func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCac file := stack[0].(*ast.File) - if !cfg.checkGeneratedFiles && generated.IsGenerated(file) { + if !cfg.checkGenerated && generated.get(file) { // Don't check this file. // Return false because the children nodes of node `n` don't have to be checked. return false, resultGeneratedFile @@ -65,20 +117,31 @@ func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCac sw := n.(*ast.SwitchStmt) - switchComments := comments.GetComments(file, pass.Fset)[sw] - if !cfg.explicitExhaustiveSwitch && containsIgnoreDirective(switchComments) { - // Skip checking of this switch statement due to ignore directive comment. - // Still return true because there may be nested switch statements - // that are not to be ignored. - return true, resultSwitchIgnoreComment + switchComments := comments.get(pass.Fset, file)[sw] + uDirectives := userDirectives(switchComments) + if !cfg.explicit && directivesIncludes(uDirectives, ignoreComment) { + // Skip checking of this switch statement due to ignore + // comment. Still return true because there may be nested + // switch statements that are not to be ignored. + return true, resultIgnoreComment + } + if cfg.explicit && !directivesIncludes(uDirectives, enforceComment) { + // Skip checking of this switch statement due to missing + // enforce comment. + return true, resultNoEnforceComment } - if cfg.explicitExhaustiveSwitch && !containsEnforceDirective(switchComments) { - // Skip checking of this switch statement due to missing enforce directive comment. - return true, resultSwitchNoEnforceComment + requireDefaultCase := cfg.defaultCaseRequired + if directivesIncludes(uDirectives, ignoreDefaultCaseRequiredComment) { + requireDefaultCase = false + } + if directivesIncludes(uDirectives, enforceDefaultCaseRequiredComment) { + // We have "if" instead of "else if" here in case of conflicting ignore/enforce directives. + // In that case, because this is second, we will default to enforcing. + requireDefaultCase = true } if sw.Tag == nil { - return true, resultNoSwitchTag + return true, resultNoSwitchTag // never possible for valid Go program? } t := pass.TypesInfo.Types[sw.Tag] @@ -86,264 +149,88 @@ func switchChecker(pass *analysis.Pass, cfg switchConfig, generated generatedCac return true, resultTagNotValue } - tagType, ok := t.Type.(*types.Named) - if !ok { - return true, resultTagNotNamed + es, ok := composingEnumTypes(pass, t.Type) + if !ok || len(es) == 0 { + return true, resultEnumTypes } - tagPkg := tagType.Obj().Pkg() - if tagPkg == nil { - // The Go documentation says: nil for labels and objects in the Universe scope. - // This happens for the `error` type, for example. - return true, resultTagNoPkg - } + var checkl checklist + checkl.ignoreConstant(cfg.ignoreConstant) + checkl.ignoreType(cfg.ignoreType) - enumTyp := enumType{tagType.Obj()} - members, ok := importFact(pass, enumTyp) - if !ok { - // switch tag's type is not a known enum type. - return true, resultTagNotEnum + for _, e := range es { + checkl.add(e.typ, e.members, pass.Pkg == e.typ.Pkg()) } - samePkg := tagPkg == pass.Pkg // do the switch statement and the switch tag type (i.e. enum type) live in the same package? - checkUnexported := samePkg // we want to include unexported members in the exhaustiveness check only if we're in the same package - checklist := makeChecklist(members, tagPkg, checkUnexported, cfg.ignoreEnumMembers) - - hasDefaultCase := analyzeSwitchClauses(sw, pass.TypesInfo, checklist.found) - - if len(checklist.remaining()) == 0 { + defaultCaseExists := analyzeSwitchClauses(sw, pass.TypesInfo, checkl.found) + if !defaultCaseExists && requireDefaultCase { + // Even if the switch explicitly enumerates all the + // enum values, the user has still required all switches + // to have a default case. We check this first to avoid + // early-outs + pass.Report(makeMissingDefaultDiagnostic(sw, dedupEnumTypes(toEnumTypes(es)))) + return true, resultMissingDefaultCase + } + if len(checkl.remaining()) == 0 { // All enum members accounted for. // Nothing to report. return true, resultEnumMembersAccounted } - if hasDefaultCase && cfg.defaultSignifiesExhaustive { - // Though enum members are not accounted for, - // the existence of the default case signifies exhaustiveness. - // So don't report. + if defaultCaseExists && cfg.defaultSignifiesExhaustive { + // Though enum members are not accounted for, the + // existence of the default case signifies + // exhaustiveness. So don't report. return true, resultDefaultCaseSuffices } - pass.Report(makeSwitchDiagnostic(sw, samePkg, enumTyp, members, checklist.remaining())) + pass.Report(makeSwitchDiagnostic(sw, dedupEnumTypes(toEnumTypes(es)), checkl.remaining())) return true, resultReportedDiagnostic } } -// switchConfig is configuration for switchChecker. -type switchConfig struct { - explicitExhaustiveSwitch bool - defaultSignifiesExhaustive bool - checkGeneratedFiles bool - ignoreEnumMembers *regexp.Regexp // can be nil -} - func isDefaultCase(c *ast.CaseClause) bool { return c.List == nil // see doc comment on List field } -func denotesPackage(ident *ast.Ident, info *types.Info) (*types.Package, bool) { - obj := info.ObjectOf(ident) - if obj == nil { - return nil, false - } - n, ok := obj.(*types.PkgName) - if !ok { - return nil, false - } - return n.Imported(), true -} - -// analyzeSwitchClauses analyzes the clauses in the supplied switch statement. -// The info param should typically be pass.TypesInfo. The found function is -// called for each enum member name found in the switch statement. -// The hasDefaultCase return value indicates whether the switch statement has a -// default clause. -func analyzeSwitchClauses(sw *ast.SwitchStmt, info *types.Info, found func(val constantValue)) (hasDefaultCase bool) { +// analyzeSwitchClauses analyzes the clauses in the supplied switch +// statement. The info param typically is pass.TypesInfo. The each +// function is called for each enum member name found in the switch +// statement. The hasDefaultCase return value indicates whether the +// switch statement has a default clause. +func analyzeSwitchClauses(sw *ast.SwitchStmt, info *types.Info, each func(val constantValue)) (hasDefaultCase bool) { for _, stmt := range sw.Body.List { caseCl := stmt.(*ast.CaseClause) if isDefaultCase(caseCl) { hasDefaultCase = true - continue // nothing more to do if it's the default case + continue } for _, expr := range caseCl.List { - analyzeCaseClauseExpr(expr, info, found) + if val, ok := exprConstVal(expr, info); ok { + each(val) + } } } return hasDefaultCase } -func analyzeCaseClauseExpr(e ast.Expr, info *types.Info, found func(val constantValue)) { - handleIdent := func(ident *ast.Ident) { - obj := info.Uses[ident] - if obj == nil { - return - } - if _, ok := obj.(*types.Const); !ok { - return - } - - // There are two scenarios. - // See related test cases in typealias/quux/quux.go. - // - // ### Scenario 1 - // - // Tag package and constant package are the same. - // - // For example: - // var mode fs.FileMode - // switch mode { - // case fs.ModeDir: - // } - // - // This is simple: we just use fs.ModeDir's value. - // - // ### Scenario 2 - // - // Tag package and constant package are different. - // - // For example: - // var mode fs.FileMode - // switch mode { - // case os.ModeDir: - // } - // - // Or equivalently: - // var mode os.FileMode // in effect, fs.FileMode because of type alias in package os - // switch mode { - // case os.ModeDir: - // } - // - // In this scenario, too, we accept the case clause expr constant - // value, as is. If the Go type checker is okay with the - // name being listed in the case clause, we don't care much further. - // - found(determineConstVal(ident, info)) - } - - e = astutil.Unparen(e) - switch e := e.(type) { - case *ast.Ident: - handleIdent(e) - - case *ast.SelectorExpr: - x := astutil.Unparen(e.X) - // Ensure we only see the form `pkg.Const`, and not e.g. `structVal.f` - // or `structVal.inner.f`. - // Check that X, which is everything except the rightmost *ast.Ident (or - // Sel), is also an *ast.Ident. - xIdent, ok := x.(*ast.Ident) - if !ok { - return - } - // Doesn't matter which package, just that it denotes a package. - if _, ok := denotesPackage(xIdent, info); !ok { - return - } - handleIdent(e.Sel) - } -} - -// diagnosticMissingMembers constructs the list of missing enum members, -// suitable for use in a reported diagnostic message. -// Order is the same as in enumMembers.Names. -func diagnosticMissingMembers(missingMembers map[string]struct{}, em enumMembers) []string { - missingNamesGroupedByValue := make([][]string, len(em.Names)) // empty groups will be filtered out later - firstIndex := make(map[constantValue]int, len(em.ValueToNames)) - for i, name := range em.Names { - value := em.NameToValue[name] - j, ok := firstIndex[value] - if !ok { - firstIndex[value] = i - j = i - } - - if _, missing := missingMembers[name]; missing { - missingNamesGroupedByValue[j] = append(missingNamesGroupedByValue[j], name) - } - } - - out := make([]string, 0, len(missingMembers)) - for _, names := range missingNamesGroupedByValue { - if len(names) == 0 { - continue - } - out = append(out, strings.Join(names, "|")) - } - return out -} - -// diagnosticEnumTypeName returns a string representation of an enum type for -// use in reported diagnostics. -func diagnosticEnumTypeName(enumType *types.TypeName, samePkg bool) string { - if samePkg { - return enumType.Name() - } - return enumType.Pkg().Name() + "." + enumType.Name() -} - -// Makes a "missing cases in switch" diagnostic. -// samePkg should be true if the enum type and the switch statement are defined -// in the same package. -func makeSwitchDiagnostic(sw *ast.SwitchStmt, samePkg bool, enumTyp enumType, allMembers enumMembers, missingMembers map[string]struct{}) analysis.Diagnostic { - message := fmt.Sprintf("missing cases in switch of type %s: %s", - diagnosticEnumTypeName(enumTyp.TypeName, samePkg), - strings.Join(diagnosticMissingMembers(missingMembers, allMembers), ", ")) - +func makeSwitchDiagnostic(sw *ast.SwitchStmt, enumTypes []enumType, missing map[member]struct{}) analysis.Diagnostic { return analysis.Diagnostic{ - Pos: sw.Pos(), - End: sw.End(), - Message: message, + Pos: sw.Pos(), + End: sw.End(), + Message: fmt.Sprintf( + "missing cases in switch of type %s: %s", + diagnosticEnumTypes(enumTypes), + diagnosticGroups(groupify(missing, enumTypes)), + ), } } -// A checklist holds a set of enum member names that have to be -// accounted for to satisfy exhaustiveness in an enum switch statement. -// -// The found method checks off member names from the set, based on -// constant value, when a constant value is encoutered in the switch -// statement's cases. -// -// The remaining method returns the member names not accounted for. -type checklist struct { - em enumMembers - checkl map[string]struct{} -} - -func makeChecklist(em enumMembers, enumPkg *types.Package, includeUnexported bool, ignore *regexp.Regexp) *checklist { - checkl := make(map[string]struct{}) - - add := func(memberName string) { - if memberName == "_" { - // Blank identifier is often used to skip entries in iota lists. - // Also, it can't be referenced anywhere (including in a switch - // statement's cases), so it doesn't make sense to include it - // as required member to satisfy exhaustiveness. - return - } - if !ast.IsExported(memberName) && !includeUnexported { - return - } - if ignore != nil && ignore.MatchString(enumPkg.Path()+"."+memberName) { - return - } - checkl[memberName] = struct{}{} - } - - for _, name := range em.Names { - add(name) - } - - return &checklist{ - em: em, - checkl: checkl, - } -} - -func (c *checklist) found(val constantValue) { - // Delete all of the same-valued names. - for _, name := range c.em.ValueToNames[val] { - delete(c.checkl, name) +func makeMissingDefaultDiagnostic(sw *ast.SwitchStmt, enumTypes []enumType) analysis.Diagnostic { + return analysis.Diagnostic{ + Pos: sw.Pos(), + End: sw.End(), + Message: fmt.Sprintf( + "missing default case in switch of type %s", + diagnosticEnumTypes(enumTypes), + ), } } - -func (c *checklist) remaining() map[string]struct{} { - return c.checkl -} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/.gitignore b/vendor/github.com/nunnatsa/ginkgolinter/.gitignore new file mode 100644 index 0000000000..7d7f8b10ce --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/.gitignore @@ -0,0 +1,2 @@ +ginkgolinter +bin/ diff --git a/vendor/github.com/nunnatsa/ginkgolinter/.golangci.yml b/vendor/github.com/nunnatsa/ginkgolinter/.golangci.yml new file mode 100644 index 0000000000..71ff0d034c --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/.golangci.yml @@ -0,0 +1,3 @@ +linters: + enable: + - revive diff --git a/vendor/github.com/nunnatsa/ginkgolinter/LICENSE b/vendor/github.com/nunnatsa/ginkgolinter/LICENSE new file mode 100644 index 0000000000..11096c5c8a --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Nahshon Unna Tsameret + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/nunnatsa/ginkgolinter/Makefile b/vendor/github.com/nunnatsa/ginkgolinter/Makefile new file mode 100644 index 0000000000..586633006a --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/Makefile @@ -0,0 +1,27 @@ +VERSION ?= "unknown" +VERSION_FLAG := -X github.com/nunnatsa/ginkgolinter/version.version=$(VERSION) +COMMIT_HASH := $(shell git rev-parse HEAD) +HASH_FLAG := -X github.com/nunnatsa/ginkgolinter/version.gitHash=$(COMMIT_HASH) + +BUILD_ARGS := -ldflags "$(VERSION_FLAG) $(HASH_FLAG)" + +build: unit-test + go build $(BUILD_ARGS) -o ginkgolinter ./cmd/ginkgolinter + +unit-test: + go test ./... + +build-for-windows: + GOOS=windows GOARCH=amd64 go build $(BUILD_ARGS) -o bin/ginkgolinter-amd64.exe ./cmd/ginkgolinter + +build-for-mac: + GOOS=darwin GOARCH=amd64 go build $(BUILD_ARGS) -o bin/ginkgolinter-amd64-darwin ./cmd/ginkgolinter + +build-for-linux: + GOOS=linux GOARCH=amd64 go build $(BUILD_ARGS) -o bin/ginkgolinter-amd64-linux ./cmd/ginkgolinter + GOOS=linux GOARCH=386 go build $(BUILD_ARGS) -o bin/ginkgolinter-386-linux ./cmd/ginkgolinter + +build-all: build build-for-linux build-for-mac build-for-windows + +test: build + ./tests/e2e.sh diff --git a/vendor/github.com/nunnatsa/ginkgolinter/README.md b/vendor/github.com/nunnatsa/ginkgolinter/README.md new file mode 100644 index 0000000000..3832f610df --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/README.md @@ -0,0 +1,433 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/nunnatsa/ginkgolinter)](https://goreportcard.com/report/github.com/nunnatsa/ginkgolinter) +[![Coverage Status](https://coveralls.io/repos/github/nunnatsa/ginkgolinter/badge.svg?branch=main)](https://coveralls.io/github/nunnatsa/ginkgolinter?branch=main) +![Build Status](https://github.com/nunnatsa/ginkgolinter/workflows/CI/badge.svg) +[![License](https://img.shields.io/github/license/nunnatsa/ginkgolinter)](/LICENSE) +[![Release](https://img.shields.io/github/release/nunnatsa/ginkgolinter.svg)](https://github.com/nunnatsa/ginkgolinter/releases/latest) +[![GitHub Releases Stats of ginkgolinter](https://img.shields.io/github/downloads/nunnatsa/ginkgolinter/total.svg?logo=github)](https://somsubhra.github.io/github-release-stats/?username=nunnatsa&repository=ginkgolinter) + +# ginkgo-linter +[ginkgo](https://onsi.github.io/ginkgo/) is a popular testing framework and [gomega](https://onsi.github.io/gomega/) is its assertion package. + +This is a golang linter to enforce some standards while using the ginkgo and gomega packages. + +## Install the CLI +Download the right executable from the latest release, according to your OS. + +Another option is to use go: +```shell +go install github.com/nunnatsa/ginkgolinter/cmd/ginkgolinter@latest +``` +Then add the new executable to your PATH. + +## usage +```shell +ginkgolinter [-fix] ./... +``` + +Use the `-fix` flag to apply the fix suggestions to the source code. + +### Use ginkgolinter with golangci-lint +The ginkgolinter is now part of the popular [golangci-lint](https://golangci-lint.run/), starting from version `v1.51.1`. + +It is not enabled by default, though. There are two ways to run ginkgolinter with golangci-lint: + +* From command line: + ```shell + golangci-lint run -E ginkgolinter ./... + ``` +* From configuration: + + Add ginkgolinter to the enabled linters list in .golangci.reference.yml file in your project. For more details, see + the [golangci-lint documentation](https://golangci-lint.run/usage/configuration/); e.g. + ```yaml + linters: + enable: + - ginkgolinter + ``` +## Linter Rules +The linter checks the ginkgo and gomega assertions in golang test code. Gomega may be used together with ginkgo tests, +For example: +```go +It("should test something", func() { // It is ginkgo test case function + Expect("abcd").To(HaveLen(4), "the string should have a length of 4") // Expect is the gomega assertion +}) +``` +or within a classic golang test code, like this: +```go +func TestWithGomega(t *testing.T) { + g := NewWithT(t) + g.Expect("abcd").To(HaveLen(4), "the string should have a length of 4") +} +``` + +In some cases, the gomega will be passed as a variable to function by ginkgo, for example: +```go +Eventually(func(g Gomega) error { + g.Expect("abcd").To(HaveLen(4), "the string should have a length of 4") + return nil +}).Should(Succeed()) +``` + +The linter checks the `Expect`, `ExpectWithOffset` and the `Ω` "actual" functions, with the `Should`, `ShouldNot`, `To`, `ToNot` and `NotTo` assertion functions. + +It also supports the embedded `Not()` matcher + +Some checks find actual bugs, and some are more for style. + +### Using a function call in async assertion [BUG] +This rule finds an actual bug in tests, where asserting a function call in an async function; e.g. `Eventually`. For +example: +```go +func slowInt(int val) int { + time.Sleep(time.Second) + return val +} + +... + +It("should test that slowInt returns 42, eventually", func() { + Eventually(slowInt(42)).WithPolling(time.Millisecond * 100).WithTimeout(time.Second * 2).Equal(42) +}) +``` +The problem with the above code is that it **should** poll - call the function - until it returns 42, but what actually +happens is that first the function is called, and we pass `42` to `Eventually` - not the function. This is not what we +tried to do here. + +The linter will suggest replacing this code by: +```go +It("should test that slowInt returns 42, eventually", func() { + Eventually(slowInt).WithArguments(42).WithPolling(time.Millisecond * 100).WithTimeout(time.Second * 2).Equal(42) +}) +``` + +The linter suggested replacing the function call by the function name. + +If function arguments are used, the linter will add the `WithArguments()` method to pass them. + +Please notice that `WithArguments()` is only supported from gomenga v1.22.0. + +When using an older version of gomega, change the code manually. For example: + +```go +It("should test that slowInt returns 42, eventually", func() { + Eventually(func() int { + slowint(42) + }).WithPolling(time.Millisecond * 100).WithTimeout(time.Second * 2).Equal(42) +}) +``` + +### Comparing a pointer with a value [BUG] +The linter warns when comparing a pointer with a value. +These comparisons are always wrong and will always fail. + +In case of a positive assertion (`To()` or `Should()`), the test will just fail. + +But the main concern is for false positive tests, when using a negative assertion (`NotTo()`, `ToNot()`, `ShouldNot()`, +`Should(Not())` etc.); e.g. +```go +num := 5 +... +pNum := &num +... +Expect(pNum).ShouldNot(Equal(6)) +``` +This assertion will pass, but for the wrong reasons: pNum is not equal 6, not because num == 5, but because pNum is +a pointer, while `6` is an `int`. + +In the case above, the linter will suggest `Expect(pNum).ShouldNot(HaveValue(Equal(6)))` + +This is also right for additional matchers: `BeTrue()` and `BeFalse()`, `BeIdenticalTo()`, `BeEquivalentTo()` +and `BeNumerically`. + +### Missing Assertion Method [BUG] +The linter warns when calling an "actual" method (e.g. `Expect()`, `Eventually()` etc.), without an assertion method (e.g +`Should()`, `NotTo()` etc.) + +For example: +```go +// no assertion for the result +Eventually(doSomething).WithTimeout(time.Seconds * 5).WithPolling(time.Milliseconds * 100) +``` + +The linter will not suggest a fix for this warning. + +This rule cannot be suppressed. + +### Focus Container / Focus individual spec found [BUG] +This rule finds ginkgo focus containers, or the `Focus` individual spec in the code. + +ginkgo supports the `FDescribe`, `FContext`, `FWhen`, `FIt`, `FDescribeTable` and `FEntry` +containers to allow the developer to focus +on a specific test or set of tests during test development or debug. + +For example: +```go +var _ = Describe("checking something", func() { + FIt("this test is the only one that will run", func(){ + ... + }) +}) +``` +Alternatively, the `Focus` individual spec may be used for the same purpose, e.g. +```go +var _ = Describe("checking something", Focus, func() { + It("this test is the only one that will run", func(){ + ... + }) +}) +``` + +These container, or the `Focus` spec, must not be part of the final source code, and should only be used locally by the +developer. + +***This rule is disabled by default***. Use the `--forbid-focus-container=true` command line flag to enable it. + +### Comparing values from different types [BUG] + +The `Equal` and the `BeIdentical` matchers also check the type, not only the value. + +The following code will fail in runtime: +```go +x := 5 // x is int +Expect(x).Should(Eqaul(uint(5)) // x and uint(5) are with different +``` +When using negative checks, it's even worse, because we get a false positive: +``` +x := 5 +Expect(x).ShouldNot(Equal(uint(5)) +``` + +The linter suggests two options to solve this warning: either compare with the same type, e.g. +using casting, or use the `BeEquivalentTo` matcher. + +The linter can't guess what is the best solution in each case, and so it won't auto-fix this warning. + +To suppress this warning entirely, use the `--suppress-type-compare-assertion=true` command line parameter. + +To suppress a specific file or line, use the `// ginkgo-linter:ignore-type-compare-warning` comment (see [below](#suppress-warning-from-the-code)) + +### Wrong Usage of the `MatchError` gomega Matcher [BUG] +The `MatchError` gomega matcher asserts an error value (and if it's not nil). +There are four valid formats for using this Matcher: +* error value; e.g. `Expect(err).To(MatchError(anotherErr))` +* string, to be equal to the output of the `Error()` method; e.g. `Expect(err).To(MatchError("Not Found"))` +* A gomega matcher that asserts strings; e.g. `Expect(err).To(MatchError(ContainSubstring("Found")))` +* [from v0.29.0] a function that receive a single error parameter and returns a single boolean value. + In this format, an additional single string parameter, with the function description, is also required; e.g. + `Expect(err).To(MatchError(isNotFound, "is the error is a not found error"))` + +These four format are checked on runtime, but sometimes it's too late. ginkgolinter performs a static analysis and so it +will find these issues on build time. + +ginkgolinter checks the following: +* Is the first parameter is one of the four options above. +* That there are no additional parameters passed to the matcher; e.g. + `MatchError(isNotFoundFunc, "a valid description" , "not used string")`. In this case, the matcher won't fail on run + time, but the additional parameters are not in use and ignored. +* If the first parameter is a function with the format of `func(error)bool`, ginkgolinter makes sure that the second + parameter exists and its type is string. + +### Wrong Length Assertion [STYLE] +The linter finds assertion of the golang built-in `len` function, with all kind of matchers, while there are already +gomega matchers for these usecases; We want to assert the item, rather than its length. + +There are several wrong patterns: +```go +Expect(len(x)).To(Equal(0)) // should be: Expect(x).To(BeEmpty()) +Expect(len(x)).To(BeZero()) // should be: Expect(x).To(BeEmpty()) +Expect(len(x)).To(BeNumeric(">", 0)) // should be: Expect(x).ToNot(BeEmpty()) +Expect(len(x)).To(BeNumeric(">=", 1)) // should be: Expect(x).ToNot(BeEmpty()) +Expect(len(x)).To(BeNumeric("==", 0)) // should be: Expect(x).To(BeEmpty()) +Expect(len(x)).To(BeNumeric("!=", 0)) // should be: Expect(x).ToNot(BeEmpty()) + +Expect(len(x)).To(Equal(1)) // should be: Expect(x).To(HaveLen(1)) +Expect(len(x)).To(BeNumeric("==", 2)) // should be: Expect(x).To(HaveLen(2)) +Expect(len(x)).To(BeNumeric("!=", 3)) // should be: Expect(x).ToNot(HaveLen(3)) +``` + +It also supports the embedded `Not()` matcher; e.g. + +`Ω(len(x)).Should(Not(Equal(4)))` => `Ω(x).ShouldNot(HaveLen(4))` + +Or even (double negative): + +`Ω(len(x)).To(Not(BeNumeric(">", 0)))` => `Ω(x).To(BeEmpty())` + +The output of the linter,when finding issues, looks like this: +``` +./testdata/src/a/a.go:14:5: ginkgo-linter: wrong length assertion; consider using `Expect("abcd").Should(HaveLen(4))` instead +./testdata/src/a/a.go:18:5: ginkgo-linter: wrong length assertion; consider using `Expect("").Should(BeEmpty())` instead +./testdata/src/a/a.go:22:5: ginkgo-linter: wrong length assertion; consider using `Expect("").Should(BeEmpty())` instead +``` +#### use the `HaveLen(0)` matcher. [STYLE] +The linter will also warn about the `HaveLen(0)` matcher, and will suggest to replace it with `BeEmpty()` + +### Wrong `nil` Assertion [STYLE] +The linter finds assertion of the comparison to nil, with all kind of matchers, instead of using the existing `BeNil()` +matcher; We want to assert the item, rather than a comparison result. + +There are several wrong patterns: + +```go +Expect(x == nil).To(Equal(true)) // should be: Expect(x).To(BeNil()) +Expect(nil == x).To(Equal(true)) // should be: Expect(x).To(BeNil()) +Expect(x != nil).To(Equal(true)) // should be: Expect(x).ToNot(BeNil()) +Expect(nil != nil).To(Equal(true)) // should be: Expect(x).ToNot(BeNil()) + +Expect(x == nil).To(BeTrue()) // should be: Expect(x).To(BeNil()) +Expect(x == nil).To(BeFalse()) // should be: Expect(x).ToNot(BeNil()) +``` +It also supports the embedded `Not()` matcher; e.g. + +`Ω(x == nil).Should(Not(BeTrue()))` => `Ω(x).ShouldNot(BeNil())` + +Or even (double negative): + +`Ω(x != nil).Should(Not(BeTrue()))` => `Ω(x).Should(BeNil())` + +### Wrong boolean Assertion [STYLE] +The linter finds assertion using the `Equal` method, with the values of to `true` or `false`, instead +of using the existing `BeTrue()` or `BeFalse()` matcher. + +There are several wrong patterns: + +```go +Expect(x).To(Equal(true)) // should be: Expect(x).To(BeTrue()) +Expect(x).To(Equal(false)) // should be: Expect(x).To(BeFalse()) +``` +It also supports the embedded `Not()` matcher; e.g. + +`Ω(x).Should(Not(Equal(True)))` => `Ω(x).ShouldNot(BeTrue())` + +### Wrong Error Assertion [STYLE] +The linter finds assertion of errors compared with nil, or to be equal nil, or to be nil. The linter suggests to use `Succeed` for functions or `HaveOccurred` for error values.. + +There are several wrong patterns: + +```go +Expect(err).To(BeNil()) // should be: Expect(err).ToNot(HaveOccurred()) +Expect(err == nil).To(Equal(true)) // should be: Expect(err).ToNot(HaveOccurred()) +Expect(err == nil).To(BeFalse()) // should be: Expect(err).To(HaveOccurred()) +Expect(err != nil).To(BeTrue()) // should be: Expect(err).To(HaveOccurred()) +Expect(funcReturnsError()).To(BeNil()) // should be: Expect(funcReturnsError()).To(Succeed()) + +and so on +``` +It also supports the embedded `Not()` matcher; e.g. + +`Ω(err == nil).Should(Not(BeTrue()))` => `Ω(x).Should(HaveOccurred())` + +### Wrong Comparison Assertion [STYLE] +The linter finds assertion of boolean comparisons, which are already supported by existing gomega matchers. + +The linter assumes that when compared something to literals or constants, these values should be used for the assertion, +and it will do its best to suggest the right assertion expression accordingly. + +There are several wrong patterns: +```go +var x = 10 +var s = "abcd" + +... + +Expect(x == 10).Should(BeTrue()) // should be Expect(x).Should(Equal(10)) +Expect(10 == x).Should(BeTrue()) // should be Expect(x).Should(Equal(10)) +Expect(x != 5).Should(Equal(true)) // should be Expect(x).ShouldNot(Equal(5)) +Expect(x != 0).Should(Equal(true)) // should be Expect(x).ShouldNot(BeZero()) + +Expect(s != "abcd").Should(BeFalse()) // should be Expect(s).Should(Equal("abcd")) +Expect("abcd" != s).Should(BeFalse()) // should be Expect(s).Should(Equal("abcd")) +``` +Or non-equal comparisons: +```go +Expect(x > 10).To(BeTrue()) // ==> Expect(x).To(BeNumerically(">", 10)) +Expect(x >= 15).To(BeTrue()) // ==> Expect(x).To(BeNumerically(">=", 15)) +Expect(3 > y).To(BeTrue()) // ==> Expect(y).To(BeNumerically("<", 3)) +// and so on ... +``` + +This check included a limited support in constant values. For example: +```go +const c1 = 5 + +... + +Expect(x1 == c1).Should(BeTrue()) // ==> Expect(x1).Should(Equal(c1)) +Expect(c1 == x1).Should(BeTrue()) // ==> Expect(x1).Should(Equal(c1)) +``` + +## Suppress the linter +### Suppress warning from command line +* Use the `--suppress-len-assertion=true` flag to suppress the wrong length assertion warning +* Use the `--suppress-nil-assertion=true` flag to suppress the wrong nil assertion warning +* Use the `--suppress-err-assertion=true` flag to suppress the wrong error assertion warning +* Use the `--suppress-compare-assertion=true` flag to suppress the wrong comparison assertion warning +* Use the `--suppress-async-assertion=true` flag to suppress the function call in async assertion warning +* Use the `--forbid-focus-container=true` flag to activate the focused container assertion (deactivated by default) +* Use the `--suppress-type-compare-assertion=true` to suppress the type compare assertion warning +* Use the `--allow-havelen-0=true` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from + command line, and not from a comment. + +### Suppress warning from the code +To suppress the wrong length assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-len-assert-warning`. + +To suppress the wrong nil assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-nil-assert-warning`. + +To suppress the wrong error assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-err-assert-warning`. + +To suppress the wrong comparison assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-compare-assert-warning`. + +To suppress the wrong async assertion warning, add a comment with (only) + +`ginkgo-linter:ignore-async-assert-warning`. + +To supress the focus container warning, add a comment with (only) + +`ginkgo-linter:ignore-focus-container-warning` + +To suppress the different type comparison, add a comment with (only) + +`ginkgo-linter:ignore-type-compare-warning` + +Notice that this comment will not work for an anonymous variable container like +```go +// ginkgo-linter:ignore-focus-container-warning (not working!!) +var _ = FDescribe(...) +``` +In this case, use the file comment (see bellow). + +There are two options to use these comments: +1. If the comment is at the top of the file, supress the warning for the whole file; e.g.: + ```go + package mypackage + + // ginkgo-linter:ignore-len-assert-warning + + import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + ) + + var _ = Describe("my test", func() { + It("should do something", func() { + Expect(len("abc")).Should(Equal(3)) // nothing in this file will trigger the warning + }) + }) + ``` + +2. If the comment is before a wrong length check expression, the warning is suppressed for this expression only; for example: + ```golang + It("should test something", func() { + // ginkgo-linter:ignore-nil-assert-warning + Expect(x == nil).Should(BeTrue()) // this line will not trigger the warning + Expect(x == nil).Should(BeTrue()) // this line will trigger the warning + } + ``` diff --git a/vendor/github.com/nunnatsa/ginkgolinter/ginkgo_linter.go b/vendor/github.com/nunnatsa/ginkgolinter/ginkgo_linter.go new file mode 100644 index 0000000000..a0aff1612d --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/ginkgo_linter.go @@ -0,0 +1,1499 @@ +package ginkgolinter + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/printer" + "go/token" + gotypes "go/types" + "reflect" + + "github.com/go-toolsmith/astcopy" + "golang.org/x/tools/go/analysis" + + "github.com/nunnatsa/ginkgolinter/ginkgohandler" + "github.com/nunnatsa/ginkgolinter/gomegahandler" + "github.com/nunnatsa/ginkgolinter/interfaces" + "github.com/nunnatsa/ginkgolinter/reverseassertion" + "github.com/nunnatsa/ginkgolinter/types" + "github.com/nunnatsa/ginkgolinter/version" +) + +// The ginkgolinter enforces standards of using ginkgo and gomega. +// +// For more details, look at the README.md file + +const ( + linterName = "ginkgo-linter" + wrongLengthWarningTemplate = linterName + ": wrong length assertion; consider using `%s` instead" + wrongNilWarningTemplate = linterName + ": wrong nil assertion; consider using `%s` instead" + wrongBoolWarningTemplate = linterName + ": wrong boolean assertion; consider using `%s` instead" + wrongErrWarningTemplate = linterName + ": wrong error assertion; consider using `%s` instead" + wrongCompareWarningTemplate = linterName + ": wrong comparison assertion; consider using `%s` instead" + doubleNegativeWarningTemplate = linterName + ": avoid double negative assertion; consider using `%s` instead" + valueInEventually = linterName + ": use a function call in %s. This actually checks nothing, because %s receives the function returned value, instead of function itself, and this value is never changed" + comparePointerToValue = linterName + ": comparing a pointer to a value will always fail. consider using `%s` instead" + missingAssertionMessage = linterName + `: %q: missing assertion method. Expected "Should()", "To()", "ShouldNot()", "ToNot()" or "NotTo()"` + missingAsyncAssertionMessage = linterName + `: %q: missing assertion method. Expected "Should()" or "ShouldNot()"` + focusContainerFound = linterName + ": Focus container found. This is used only for local debug and should not be part of the actual source code, consider to replace with %q" + focusSpecFound = linterName + ": Focus spec found. This is used only for local debug and should not be part of the actual source code, consider to remove it" + compareDifferentTypes = linterName + ": use %[1]s with different types: Comparing %[2]s with %[3]s; either change the expected value type if possible, or use the BeEquivalentTo() matcher, instead of %[1]s()" + matchErrorArgWrongType = linterName + ": the MatchError matcher used to assert a non error type (%s)" + matchErrorWrongTypeAssertion = linterName + ": MatchError first parameter (%s) must be error, string, GomegaMatcher or func(error)bool are allowed" + matchErrorMissingDescription = linterName + ": missing function description as second parameter of MatchError" + matchErrorRedundantArg = linterName + ": redundant MatchError arguments; consider removing them; consider using `%s` instead" + matchErrorNoFuncDescription = linterName + ": The second parameter of MatchError must be the function description (string)" +) + +const ( // gomega matchers + beEmpty = "BeEmpty" + beEquivalentTo = "BeEquivalentTo" + beFalse = "BeFalse" + beIdenticalTo = "BeIdenticalTo" + beNil = "BeNil" + beNumerically = "BeNumerically" + beTrue = "BeTrue" + beZero = "BeZero" + equal = "Equal" + haveLen = "HaveLen" + haveOccurred = "HaveOccurred" + haveValue = "HaveValue" + not = "Not" + omega = "Ω" + succeed = "Succeed" + and = "And" + or = "Or" + withTransform = "WithTransform" + matchError = "MatchError" +) + +const ( // gomega actuals + expect = "Expect" + expectWithOffset = "ExpectWithOffset" + eventually = "Eventually" + eventuallyWithOffset = "EventuallyWithOffset" + consistently = "Consistently" + consistentlyWithOffset = "ConsistentlyWithOffset" +) + +// Analyzer is the interface to go_vet +var Analyzer = NewAnalyzer() + +type ginkgoLinter struct { + config *types.Config +} + +// NewAnalyzer returns an Analyzer - the package interface with nogo +func NewAnalyzer() *analysis.Analyzer { + linter := ginkgoLinter{ + config: &types.Config{ + SuppressLen: false, + SuppressNil: false, + SuppressErr: false, + SuppressCompare: false, + ForbidFocus: false, + AllowHaveLen0: false, + }, + } + + a := &analysis.Analyzer{ + Name: "ginkgolinter", + Doc: fmt.Sprintf(doc, version.Version()), + Run: linter.run, + } + + var ignored bool + a.Flags.Init("ginkgolinter", flag.ExitOnError) + a.Flags.Var(&linter.config.SuppressLen, "suppress-len-assertion", "Suppress warning for wrong length assertions") + a.Flags.Var(&linter.config.SuppressNil, "suppress-nil-assertion", "Suppress warning for wrong nil assertions") + a.Flags.Var(&linter.config.SuppressErr, "suppress-err-assertion", "Suppress warning for wrong error assertions") + a.Flags.Var(&linter.config.SuppressCompare, "suppress-compare-assertion", "Suppress warning for wrong comparison assertions") + a.Flags.Var(&linter.config.SuppressAsync, "suppress-async-assertion", "Suppress warning for function call in async assertion, like Eventually") + a.Flags.Var(&linter.config.SuppressTypeCompare, "suppress-type-compare-assertion", "Suppress warning for comparing values from different types, like int32 and uint32") + a.Flags.Var(&linter.config.AllowHaveLen0, "allow-havelen-0", "Do not warn for HaveLen(0); default = false") + + a.Flags.BoolVar(&ignored, "suppress-focus-container", true, "Suppress warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt. Deprecated and ignored: use --forbid-focus-container instead") + a.Flags.Var(&linter.config.ForbidFocus, "forbid-focus-container", "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") + + return a +} + +const doc = `enforces standards of using ginkgo and gomega + +or + ginkgolinter version + +version: %s + +currently, the linter searches for following: +* trigger a warning when using Eventually or Constantly with a function call. This is in order to prevent the case when + using a function call instead of a function. Function call returns a value only once, and so the original value + is tested again and again and is never changed. [Bug] + +* trigger a warning when comparing a pointer to a value. [Bug] + +* trigger a warning for missing assertion method: [Bug] + Eventually(checkSomething) + +* trigger a warning when a ginkgo focus container (FDescribe, FContext, FWhen or FIt) is found. [Bug] + +* validate the MatchError gomega matcher [Bug] + +* trigger a warning when using the Equal or the BeIdentical matcher with two different types, as these matchers will + fail in runtime. + +* wrong length assertions. We want to assert the item rather than its length. [Style] +For example: + Expect(len(x)).Should(Equal(1)) +This should be replaced with: + Expect(x)).Should(HavelLen(1)) + +* wrong nil assertions. We want to assert the item rather than a comparison result. [Style] +For example: + Expect(x == nil).Should(BeTrue()) +This should be replaced with: + Expect(x).Should(BeNil()) + +* wrong error assertions. For example: [Style] + Expect(err == nil).Should(BeTrue()) +This should be replaced with: + Expect(err).ShouldNot(HaveOccurred()) + +* wrong boolean comparison, for example: [Style] + Expect(x == 8).Should(BeTrue()) +This should be replaced with: + Expect(x).Should(BeEqual(8)) + +* replaces Equal(true/false) with BeTrue()/BeFalse() [Style] + +* replaces HaveLen(0) with BeEmpty() [Style] +` + +// main assertion function +func (l *ginkgoLinter) run(pass *analysis.Pass) (interface{}, error) { + for _, file := range pass.Files { + fileConfig := l.config.Clone() + + cm := ast.NewCommentMap(pass.Fset, file, file.Comments) + + fileConfig.UpdateFromFile(cm) + + gomegaHndlr := gomegahandler.GetGomegaHandler(file) + ginkgoHndlr := ginkgohandler.GetGinkgoHandler(file) + + if gomegaHndlr == nil && ginkgoHndlr == nil { // no gomega or ginkgo imports => no use in gomega in this file; nothing to do here + continue + } + + //gomegaMatcher = getMatcherInterface(pass, file) + + ast.Inspect(file, func(n ast.Node) bool { + if ginkgoHndlr != nil && fileConfig.ForbidFocus { + spec, ok := n.(*ast.ValueSpec) + if ok { + for _, val := range spec.Values { + if exp, ok := val.(*ast.CallExpr); ok { + if checkFocusContainer(pass, ginkgoHndlr, exp) { + return true + } + } + } + } + } + + stmt, ok := n.(*ast.ExprStmt) + if !ok { + return true + } + + config := fileConfig.Clone() + + if comments, ok := cm[stmt]; ok { + config.UpdateFromComment(comments) + } + + // search for function calls + assertionExp, ok := stmt.X.(*ast.CallExpr) + if !ok { + return true + } + + if ginkgoHndlr != nil && bool(config.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, assertionExp) { + return true + } + + // no more ginkgo checks. From here it's only gomega. So if there is no gomega handler, exit here. This is + // mostly to prevent nil pointer error. + if gomegaHndlr == nil { + return true + } + + assertionFunc, ok := assertionExp.Fun.(*ast.SelectorExpr) + if !ok { + checkNoAssertion(pass, assertionExp, gomegaHndlr) + return true + } + + if !isAssertionFunc(assertionFunc.Sel.Name) { + checkNoAssertion(pass, assertionExp, gomegaHndlr) + return true + } + + actualExpr := gomegaHndlr.GetActualExpr(assertionFunc) + if actualExpr == nil { + return true + } + + return checkExpression(pass, config, assertionExp, actualExpr, gomegaHndlr) + }) + } + return nil, nil +} + +func checkFocusContainer(pass *analysis.Pass, ginkgoHndlr ginkgohandler.Handler, exp *ast.CallExpr) bool { + foundFocus := false + isFocus, id := ginkgoHndlr.GetFocusContainerName(exp) + if isFocus { + reportNewName(pass, id, id.Name[1:], focusContainerFound, id.Name) + foundFocus = true + } + + if id != nil && ginkgohandler.IsContainer(id) { + for _, arg := range exp.Args { + if ginkgoHndlr.IsFocusSpec(arg) { + reportNoFix(pass, arg.Pos(), focusSpecFound) + foundFocus = true + } else if callExp, ok := arg.(*ast.CallExpr); ok { + if checkFocusContainer(pass, ginkgoHndlr, callExp) { // handle table entries + foundFocus = true + } + } + } + } + + return foundFocus +} + +func checkExpression(pass *analysis.Pass, config types.Config, assertionExp *ast.CallExpr, actualExpr *ast.CallExpr, handler gomegahandler.Handler) bool { + expr := astcopy.CallExpr(assertionExp) + oldExpr := goFmt(pass.Fset, expr) + + if checkAsyncAssertion(pass, config, expr, actualExpr, handler, oldExpr) { + return true + } + + actualArg := getActualArg(actualExpr, handler) + if actualArg == nil { + return true + } + + if !bool(config.SuppressLen) && isActualIsLenFunc(actualArg) { + return checkLengthMatcher(expr, pass, handler, oldExpr) + } else if nilable, compOp := getNilableFromComparison(actualArg); nilable != nil { + if isExprError(pass, nilable) { + if config.SuppressErr { + return true + } + } else if config.SuppressNil { + return true + } + + return checkNilMatcher(expr, pass, nilable, handler, compOp == token.NEQ, oldExpr) + + } else if first, second, op, ok := isComparison(pass, actualArg); ok { + matcher, shouldContinue := startCheckComparison(expr, handler) + if !shouldContinue { + return false + } + if !bool(config.SuppressLen) && isActualIsLenFunc(first) { + if handleLenComparison(pass, expr, matcher, first, second, op, handler, oldExpr) { + return false + } + } + return bool(config.SuppressCompare) || checkComparison(expr, pass, matcher, handler, first, second, op, oldExpr) + + } else if checkMatchError(pass, assertionExp, actualArg, handler, oldExpr) { + return false + } else if isExprError(pass, actualArg) { + return bool(config.SuppressErr) || checkNilError(pass, expr, handler, actualArg, oldExpr) + + } else if checkPointerComparison(pass, config, assertionExp, expr, actualArg, handler, oldExpr) { + return false + } else if !handleAssertionOnly(pass, config, expr, handler, actualArg, oldExpr, true) { + return false + } else if !config.SuppressTypeCompare { + return !checkEqualWrongType(pass, assertionExp, actualArg, handler, oldExpr) + } + + return true +} + +func checkMatchError(pass *analysis.Pass, origExp *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, oldExpr string) bool { + matcher, ok := origExp.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return doCheckMatchError(pass, origExp, matcher, actualArg, handler, oldExpr) +} + +func doCheckMatchError(pass *analysis.Pass, origExp *ast.CallExpr, matcher *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, oldExpr string) bool { + name, ok := handler.GetActualFuncName(matcher) + if !ok { + return false + } + switch name { + case matchError: + case not: + nested, ok := matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return doCheckMatchError(pass, origExp, nested, actualArg, handler, oldExpr) + case and, or: + res := true + for _, arg := range matcher.Args { + if nested, ok := arg.(*ast.CallExpr); ok { + if !doCheckMatchError(pass, origExp, nested, actualArg, handler, oldExpr) { + res = false + } + } + } + return res + default: + return false + } + + if !isExprError(pass, actualArg) { + reportNoFix(pass, origExp.Pos(), matchErrorArgWrongType, goFmt(pass.Fset, actualArg)) + } + + expr := astcopy.CallExpr(matcher) + + validAssertion, requiredParams := checkMatchErrorAssertion(pass, matcher) + if !validAssertion { + reportNoFix(pass, expr.Pos(), matchErrorWrongTypeAssertion, goFmt(pass.Fset, matcher.Args[0])) + return false + } + + numParams := len(matcher.Args) + if numParams == requiredParams { + if numParams == 2 { + t := pass.TypesInfo.TypeOf(matcher.Args[1]) + if !gotypes.Identical(t, gotypes.Typ[gotypes.String]) { + pass.Reportf(expr.Pos(), matchErrorNoFuncDescription) + return false + } + } + return true + } + + if requiredParams == 2 && numParams == 1 { + reportNoFix(pass, expr.Pos(), matchErrorMissingDescription) + return false + } + + var newArgsSuggestion = []ast.Expr{expr.Args[0]} + if requiredParams == 2 { + newArgsSuggestion = append(newArgsSuggestion, expr.Args[1]) + } + expr.Args = newArgsSuggestion + report(pass, expr, matchErrorRedundantArg, oldExpr) + return false +} + +func checkMatchErrorAssertion(pass *analysis.Pass, matcher *ast.CallExpr) (bool, int) { + if isErrorMatcherValidArg(pass, matcher.Args[0]) { + return true, 1 + } + + t1 := pass.TypesInfo.TypeOf(matcher.Args[0]) + if isFuncErrBool(t1) { + return true, 2 + } + + return false, 0 +} + +// isFuncErrBool checks if a function is with the signature `func(error) bool` +func isFuncErrBool(t gotypes.Type) bool { + sig, ok := t.(*gotypes.Signature) + if !ok { + return false + } + if sig.Params().Len() != 1 || sig.Results().Len() != 1 { + return false + } + + if !interfaces.ImplementsError(sig.Params().At(0).Type()) { + return false + } + + b, ok := sig.Results().At(0).Type().(*gotypes.Basic) + if ok && b.Name() == "bool" && b.Info() == gotypes.IsBoolean && b.Kind() == gotypes.Bool { + return true + } + + return false +} + +func isErrorMatcherValidArg(pass *analysis.Pass, arg ast.Expr) bool { + if isExprError(pass, arg) { + return true + } + + if t, ok := pass.TypesInfo.TypeOf(arg).(*gotypes.Basic); ok && t.Kind() == gotypes.String { + return true + } + + t := pass.TypesInfo.TypeOf(arg) + + return interfaces.ImplementsGomegaMatcher(t) +} + +func checkEqualWrongType(pass *analysis.Pass, origExp *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, old string) bool { + matcher, ok := origExp.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return checkEqualDifferentTypes(pass, matcher, actualArg, handler, old, false) +} + +func checkEqualDifferentTypes(pass *analysis.Pass, matcher *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, old string, parentPointer bool) bool { + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return false + } + + actualType := pass.TypesInfo.TypeOf(actualArg) + + switch matcherFuncName { + case equal, beIdenticalTo: // continue + case and, or: + foundIssue := false + for _, nestedExp := range matcher.Args { + nested, ok := nestedExp.(*ast.CallExpr) + if !ok { + continue + } + if checkEqualDifferentTypes(pass, nested, actualArg, handler, old, parentPointer) { + foundIssue = true + } + } + + return foundIssue + case withTransform: + nested, ok := matcher.Args[1].(*ast.CallExpr) + if !ok { + return false + } + + matcherFuncName, ok = handler.GetActualFuncName(nested) + switch matcherFuncName { + case equal, beIdenticalTo: + case not: + return checkEqualDifferentTypes(pass, nested, actualArg, handler, old, parentPointer) + default: + return false + } + + if t := getFuncType(pass, matcher.Args[0]); t != nil { + actualType = t + matcher = nested + + if !ok { + return false + } + } else { + return checkEqualDifferentTypes(pass, nested, actualArg, handler, old, parentPointer) + } + + case not: + nested, ok := matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return checkEqualDifferentTypes(pass, nested, actualArg, handler, old, parentPointer) + + case haveValue: + nested, ok := matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + return checkEqualDifferentTypes(pass, nested, actualArg, handler, old, true) + default: + return false + } + + matcherValue := matcher.Args[0] + + switch act := actualType.(type) { + case *gotypes.Tuple: + actualType = act.At(0).Type() + case *gotypes.Pointer: + if parentPointer { + actualType = act.Elem() + } + } + + matcherType := pass.TypesInfo.TypeOf(matcherValue) + + if !reflect.DeepEqual(matcherType, actualType) { + // Equal can handle comparison of interface and a value that implements it + if isImplementing(matcherType, actualType) || isImplementing(actualType, matcherType) { + return false + } + + reportNoFix(pass, matcher.Pos(), compareDifferentTypes, matcherFuncName, actualType, matcherType) + return true + } + + return false +} + +func getFuncType(pass *analysis.Pass, expr ast.Expr) gotypes.Type { + switch f := expr.(type) { + case *ast.FuncLit: + if f.Type != nil && f.Type.Results != nil && len(f.Type.Results.List) > 0 { + return pass.TypesInfo.TypeOf(f.Type.Results.List[0].Type) + } + case *ast.Ident: + a := pass.TypesInfo.TypeOf(f) + if sig, ok := a.(*gotypes.Signature); ok && sig.Results().Len() > 0 { + return sig.Results().At(0).Type() + } + } + + return nil +} + +func isImplementing(ifs, impl gotypes.Type) bool { + if gotypes.IsInterface(ifs) { + + var ( + theIfs *gotypes.Interface + ok bool + ) + + for { + theIfs, ok = ifs.(*gotypes.Interface) + if ok { + break + } + ifs = ifs.Underlying() + } + + return gotypes.Implements(impl, theIfs) + } + return false +} + +// be careful - never change origExp!!! only modify its clone, expr!!! +func checkPointerComparison(pass *analysis.Pass, config types.Config, origExp *ast.CallExpr, expr *ast.CallExpr, actualArg ast.Expr, handler gomegahandler.Handler, oldExpr string) bool { + if !isPointer(pass, actualArg) { + return false + } + matcher, ok := origExp.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return false + } + + // not using recurse here, since we need the original expression, in order to get the TypeInfo, while we should not + // modify it. + for matcherFuncName == not { + reverseAssertionFuncLogic(expr) + expr.Args[0] = expr.Args[0].(*ast.CallExpr).Args[0] + matcher, ok = matcher.Args[0].(*ast.CallExpr) + if !ok { + return false + } + + matcherFuncName, ok = handler.GetActualFuncName(matcher) + if !ok { + return false + } + } + + switch matcherFuncName { + case equal, beIdenticalTo, beEquivalentTo: + arg := matcher.Args[0] + if isPointer(pass, arg) { + return false + } + if isNil(arg) { + return false + } + if isInterface(pass, arg) { + return false + } + case beFalse, beTrue, beNumerically: + default: + return false + } + + handleAssertionOnly(pass, config, expr, handler, actualArg, oldExpr, false) + + args := []ast.Expr{astcopy.CallExpr(expr.Args[0].(*ast.CallExpr))} + handler.ReplaceFunction(expr.Args[0].(*ast.CallExpr), ast.NewIdent(haveValue)) + expr.Args[0].(*ast.CallExpr).Args = args + report(pass, expr, comparePointerToValue, oldExpr) + + return true + +} + +// check async assertion does not assert function call. This is a real bug in the test. In this case, the assertion is +// done on the returned value, instead of polling the result of a function, for instance. +func checkAsyncAssertion(pass *analysis.Pass, config types.Config, expr *ast.CallExpr, actualExpr *ast.CallExpr, handler gomegahandler.Handler, oldExpr string) bool { + funcName, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return false + } + + var funcIndex int + switch funcName { + case eventually, consistently: + funcIndex = 0 + case eventuallyWithOffset, consistentlyWithOffset: + funcIndex = 1 + default: + return false + } + + if !config.SuppressAsync && len(actualExpr.Args) > funcIndex { + t := pass.TypesInfo.TypeOf(actualExpr.Args[funcIndex]) + + // skip context variable, if used as first argument + if "context.Context" == t.String() { + funcIndex++ + } + + if len(actualExpr.Args) > funcIndex { + if fun, funcCall := actualExpr.Args[funcIndex].(*ast.CallExpr); funcCall { + t = pass.TypesInfo.TypeOf(fun) + if !isValidAsyncValueType(t) { + actualExpr = handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + + if len(fun.Args) > 0 { + origArgs := actualExpr.Args + origFunc := actualExpr.Fun + actualExpr.Args = fun.Args + + origArgs[funcIndex] = fun.Fun + call := &ast.SelectorExpr{ + Sel: ast.NewIdent("WithArguments"), + X: &ast.CallExpr{ + Fun: origFunc, + Args: origArgs, + }, + } + + actualExpr.Fun = call + actualExpr.Args = fun.Args + } else { + actualExpr.Args[funcIndex] = fun.Fun + } + + handleAssertionOnly(pass, config, expr, handler, actualExpr, oldExpr, false) + report(pass, expr, fmt.Sprintf(valueInEventually, funcName, funcName)+"; consider using `%s` instead", oldExpr) + return true + } + } + } + } + + handleAssertionOnly(pass, config, expr, handler, actualExpr, oldExpr, true) + return true +} + +func isValidAsyncValueType(t gotypes.Type) bool { + switch t.(type) { + // allow functions that return function or channel. + case *gotypes.Signature, *gotypes.Chan, *gotypes.Pointer: + return true + case *gotypes.Named: + return isValidAsyncValueType(t.Underlying()) + } + + return false +} + +func startCheckComparison(exp *ast.CallExpr, handler gomegahandler.Handler) (*ast.CallExpr, bool) { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return nil, false + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return nil, false + } + + switch matcherFuncName { + case beTrue: + case beFalse: + reverseAssertionFuncLogic(exp) + case equal: + boolean, found := matcher.Args[0].(*ast.Ident) + if !found { + return nil, false + } + + if boolean.Name == "false" { + reverseAssertionFuncLogic(exp) + } else if boolean.Name != "true" { + return nil, false + } + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return startCheckComparison(exp, handler) + + default: + return nil, false + } + + return matcher, true +} + +func checkComparison(exp *ast.CallExpr, pass *analysis.Pass, matcher *ast.CallExpr, handler gomegahandler.Handler, first ast.Expr, second ast.Expr, op token.Token, oldExp string) bool { + fun, ok := exp.Fun.(*ast.SelectorExpr) + if !ok { + return true + } + + call := handler.GetActualExpr(fun) + if call == nil { + return true + } + + switch op { + case token.EQL: + handleEqualComparison(pass, matcher, first, second, handler) + + case token.NEQ: + reverseAssertionFuncLogic(exp) + handleEqualComparison(pass, matcher, first, second, handler) + case token.GTR, token.GEQ, token.LSS, token.LEQ: + if !isNumeric(pass, first) { + return true + } + handler.ReplaceFunction(matcher, ast.NewIdent(beNumerically)) + matcher.Args = []ast.Expr{ + &ast.BasicLit{Kind: token.STRING, Value: fmt.Sprintf(`"%s"`, op.String())}, + second, + } + default: + return true + } + + call.Args = []ast.Expr{first} + report(pass, exp, wrongCompareWarningTemplate, oldExp) + return false +} + +func handleEqualComparison(pass *analysis.Pass, matcher *ast.CallExpr, first ast.Expr, second ast.Expr, handler gomegahandler.Handler) { + if isZero(pass, second) { + handler.ReplaceFunction(matcher, ast.NewIdent(beZero)) + matcher.Args = nil + } else { + t := pass.TypesInfo.TypeOf(first) + if gotypes.IsInterface(t) { + handler.ReplaceFunction(matcher, ast.NewIdent(beIdenticalTo)) + } else if _, ok := t.(*gotypes.Pointer); ok { + handler.ReplaceFunction(matcher, ast.NewIdent(beIdenticalTo)) + } else { + handler.ReplaceFunction(matcher, ast.NewIdent(equal)) + } + + matcher.Args = []ast.Expr{second} + } +} + +func handleLenComparison(pass *analysis.Pass, exp *ast.CallExpr, matcher *ast.CallExpr, first ast.Expr, second ast.Expr, op token.Token, handler gomegahandler.Handler, oldExpr string) bool { + switch op { + case token.EQL: + case token.NEQ: + reverseAssertionFuncLogic(exp) + default: + return false + } + + var eql *ast.Ident + if isZero(pass, second) { + eql = ast.NewIdent(beEmpty) + } else { + eql = ast.NewIdent(haveLen) + matcher.Args = []ast.Expr{second} + } + + handler.ReplaceFunction(matcher, eql) + firstLen, ok := first.(*ast.CallExpr) // assuming it's len() + if !ok { + return false // should never happen + } + + val := firstLen.Args[0] + fun := handler.GetActualExpr(exp.Fun.(*ast.SelectorExpr)) + fun.Args = []ast.Expr{val} + + report(pass, exp, wrongLengthWarningTemplate, oldExpr) + return true +} + +// Check if the "actual" argument is a call to the golang built-in len() function +func isActualIsLenFunc(actualArg ast.Expr) bool { + lenArgExp, ok := actualArg.(*ast.CallExpr) + if !ok { + return false + } + + lenFunc, ok := lenArgExp.Fun.(*ast.Ident) + return ok && lenFunc.Name == "len" +} + +// Check if matcher function is in one of the patterns we want to avoid +func checkLengthMatcher(exp *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, oldExp string) bool { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return true + } + + switch matcherFuncName { + case equal: + handleEqualMatcher(matcher, pass, exp, handler, oldExp) + return false + + case beZero: + handleBeZero(pass, exp, handler, oldExp) + return false + + case beNumerically: + return handleBeNumerically(matcher, pass, exp, handler, oldExp) + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return checkLengthMatcher(exp, pass, handler, oldExp) + + default: + return true + } +} + +// Check if matcher function is in one of the patterns we want to avoid +func checkNilMatcher(exp *ast.CallExpr, pass *analysis.Pass, nilable ast.Expr, handler gomegahandler.Handler, notEqual bool, oldExp string) bool { + matcher, ok := exp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + matcherFuncName, ok := handler.GetActualFuncName(matcher) + if !ok { + return true + } + + switch matcherFuncName { + case equal: + handleEqualNilMatcher(matcher, pass, exp, handler, nilable, notEqual, oldExp) + + case beTrue: + handleNilBeBoolMatcher(pass, exp, handler, nilable, notEqual, oldExp) + + case beFalse: + reverseAssertionFuncLogic(exp) + handleNilBeBoolMatcher(pass, exp, handler, nilable, notEqual, oldExp) + + case not: + reverseAssertionFuncLogic(exp) + exp.Args[0] = exp.Args[0].(*ast.CallExpr).Args[0] + return checkNilMatcher(exp, pass, nilable, handler, notEqual, oldExp) + + default: + return true + } + return false +} + +func checkNilError(pass *analysis.Pass, assertionExp *ast.CallExpr, handler gomegahandler.Handler, actualArg ast.Expr, oldExpr string) bool { + if len(assertionExp.Args) == 0 { + return true + } + + equalFuncExpr, ok := assertionExp.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + funcName, ok := handler.GetActualFuncName(equalFuncExpr) + if !ok { + return true + } + + switch funcName { + case beNil: // no additional processing needed. + case equal: + + if len(equalFuncExpr.Args) == 0 { + return true + } + + nilable, ok := equalFuncExpr.Args[0].(*ast.Ident) + if !ok || nilable.Name != "nil" { + return true + } + + case not: + reverseAssertionFuncLogic(assertionExp) + assertionExp.Args[0] = assertionExp.Args[0].(*ast.CallExpr).Args[0] + return checkNilError(pass, assertionExp, handler, actualArg, oldExpr) + default: + return true + } + + var newFuncName string + if _, ok := actualArg.(*ast.CallExpr); ok { + newFuncName = succeed + } else { + reverseAssertionFuncLogic(assertionExp) + newFuncName = haveOccurred + } + + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(newFuncName)) + equalFuncExpr.Args = nil + + report(pass, assertionExp, wrongErrWarningTemplate, oldExpr) + return false +} + +// handleAssertionOnly checks use-cases when the actual value is valid, but only the assertion should be fixed +// it handles: +// +// Equal(nil) => BeNil() +// Equal(true) => BeTrue() +// Equal(false) => BeFalse() +// HaveLen(0) => BeEmpty() +func handleAssertionOnly(pass *analysis.Pass, config types.Config, expr *ast.CallExpr, handler gomegahandler.Handler, actualArg ast.Expr, oldExpr string, shouldReport bool) bool { + if len(expr.Args) == 0 { + return true + } + + equalFuncExpr, ok := expr.Args[0].(*ast.CallExpr) + if !ok { + return true + } + + funcName, ok := handler.GetActualFuncName(equalFuncExpr) + if !ok { + return true + } + + switch funcName { + case equal: + if len(equalFuncExpr.Args) == 0 { + return true + } + + tkn, ok := equalFuncExpr.Args[0].(*ast.Ident) + if !ok { + return true + } + + var replacement string + var template string + switch tkn.Name { + case "nil": + if config.SuppressNil { + return true + } + replacement = beNil + template = wrongNilWarningTemplate + case "true": + replacement = beTrue + template = wrongBoolWarningTemplate + case "false": + if isNegativeAssertion(expr) { + reverseAssertionFuncLogic(expr) + replacement = beTrue + } else { + replacement = beFalse + } + template = wrongBoolWarningTemplate + default: + return true + } + + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(replacement)) + equalFuncExpr.Args = nil + + if shouldReport { + report(pass, expr, template, oldExpr) + } + + return false + + case beFalse: + if isNegativeAssertion(expr) { + reverseAssertionFuncLogic(expr) + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(beTrue)) + if shouldReport { + report(pass, expr, doubleNegativeWarningTemplate, oldExpr) + } + } + return false + + case haveLen: + if config.AllowHaveLen0 { + return true + } + + if len(equalFuncExpr.Args) > 0 { + if isZero(pass, equalFuncExpr.Args[0]) { + handler.ReplaceFunction(equalFuncExpr, ast.NewIdent(beEmpty)) + equalFuncExpr.Args = nil + if shouldReport { + report(pass, expr, wrongLengthWarningTemplate, oldExpr) + } + return false + } + } + + return true + + case not: + reverseAssertionFuncLogic(expr) + expr.Args[0] = expr.Args[0].(*ast.CallExpr).Args[0] + return handleAssertionOnly(pass, config, expr, handler, actualArg, oldExpr, shouldReport) + default: + return true + } +} + +func isZero(pass *analysis.Pass, arg ast.Expr) bool { + if val, ok := arg.(*ast.BasicLit); ok && val.Kind == token.INT && val.Value == "0" { + return true + } + info, ok := pass.TypesInfo.Types[arg] + if ok { + if t, ok := info.Type.(*gotypes.Basic); ok && t.Kind() == gotypes.Int && info.Value != nil { + if i, ok := constant.Int64Val(info.Value); ok && i == 0 { + return true + } + } + } else if val, ok := arg.(*ast.Ident); ok && val.Obj != nil && val.Obj.Kind == ast.Con { + if spec, ok := val.Obj.Decl.(*ast.ValueSpec); ok { + if len(spec.Values) == 1 { + if value, ok := spec.Values[0].(*ast.BasicLit); ok && value.Kind == token.INT && value.Value == "0" { + return true + } + } + } + } + + return false +} + +// getActualArg checks that the function is an assertion's actual function and return the "actual" parameter. If the +// function is not assertion's actual function, return nil. +func getActualArg(actualExpr *ast.CallExpr, handler gomegahandler.Handler) ast.Expr { + funcName, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return nil + } + + switch funcName { + case expect, omega: + return actualExpr.Args[0] + case expectWithOffset: + return actualExpr.Args[1] + default: + return nil + } +} + +// Replace the len function call by its parameter, to create a fix suggestion +func replaceLenActualArg(actualExpr *ast.CallExpr, handler gomegahandler.Handler) { + name, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return + } + + switch name { + case expect, omega: + arg := actualExpr.Args[0] + if isActualIsLenFunc(arg) { + // replace the len function call by its parameter, to create a fix suggestion + actualExpr.Args[0] = arg.(*ast.CallExpr).Args[0] + } + case expectWithOffset: + arg := actualExpr.Args[1] + if isActualIsLenFunc(arg) { + // replace the len function call by its parameter, to create a fix suggestion + actualExpr.Args[1] = arg.(*ast.CallExpr).Args[0] + } + } +} + +// Replace the nil comparison with the compared object, to create a fix suggestion +func replaceNilActualArg(actualExpr *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr) bool { + actualFuncName, ok := handler.GetActualFuncName(actualExpr) + if !ok { + return false + } + + switch actualFuncName { + case expect, omega: + actualExpr.Args[0] = nilable + return true + + case expectWithOffset: + actualExpr.Args[1] = nilable + return true + + default: + return false + } +} + +// For the BeNumerically matcher, we want to avoid the assertion of length to be > 0 or >= 1, or just == number +func handleBeNumerically(matcher *ast.CallExpr, pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, oldExp string) bool { + opExp, ok1 := matcher.Args[0].(*ast.BasicLit) + valExp, ok2 := matcher.Args[1].(*ast.BasicLit) + + if ok1 && ok2 { + op := opExp.Value + val := valExp.Value + + if (op == `">"` && val == "0") || (op == `">="` && val == "1") { + reverseAssertionFuncLogic(exp) + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(beEmpty)) + exp.Args[0].(*ast.CallExpr).Args = nil + reportLengthAssertion(pass, exp, handler, oldExp) + return false + } else if op == `"=="` { + chooseNumericMatcher(pass, exp, handler, valExp) + reportLengthAssertion(pass, exp, handler, oldExp) + + return false + } else if op == `"!="` { + reverseAssertionFuncLogic(exp) + chooseNumericMatcher(pass, exp, handler, valExp) + reportLengthAssertion(pass, exp, handler, oldExp) + + return false + } + } + return true +} + +func chooseNumericMatcher(pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, valExp ast.Expr) { + caller := exp.Args[0].(*ast.CallExpr) + if isZero(pass, valExp) { + handler.ReplaceFunction(caller, ast.NewIdent(beEmpty)) + exp.Args[0].(*ast.CallExpr).Args = nil + } else { + handler.ReplaceFunction(caller, ast.NewIdent(haveLen)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{valExp} + } +} + +func reverseAssertionFuncLogic(exp *ast.CallExpr) { + assertionFunc := exp.Fun.(*ast.SelectorExpr).Sel + assertionFunc.Name = reverseassertion.ChangeAssertionLogic(assertionFunc.Name) +} + +func isNegativeAssertion(exp *ast.CallExpr) bool { + assertionFunc := exp.Fun.(*ast.SelectorExpr).Sel + return reverseassertion.IsNegativeLogic(assertionFunc.Name) +} + +func handleEqualMatcher(matcher *ast.CallExpr, pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, oldExp string) { + equalTo, ok := matcher.Args[0].(*ast.BasicLit) + if ok { + chooseNumericMatcher(pass, exp, handler, equalTo) + } else { + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(haveLen)) + exp.Args[0].(*ast.CallExpr).Args = []ast.Expr{matcher.Args[0]} + } + reportLengthAssertion(pass, exp, handler, oldExp) +} + +func handleBeZero(pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, oldExp string) { + exp.Args[0].(*ast.CallExpr).Args = nil + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(beEmpty)) + reportLengthAssertion(pass, exp, handler, oldExp) +} + +func handleEqualNilMatcher(matcher *ast.CallExpr, pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr, notEqual bool, oldExp string) { + equalTo, ok := matcher.Args[0].(*ast.Ident) + if !ok { + return + } + + if equalTo.Name == "false" { + reverseAssertionFuncLogic(exp) + } else if equalTo.Name != "true" { + return + } + + newFuncName, isItError := handleNilComparisonErr(pass, exp, nilable) + + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(newFuncName)) + exp.Args[0].(*ast.CallExpr).Args = nil + + reportNilAssertion(pass, exp, handler, nilable, notEqual, oldExp, isItError) +} + +func handleNilBeBoolMatcher(pass *analysis.Pass, exp *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr, notEqual bool, oldExp string) { + newFuncName, isItError := handleNilComparisonErr(pass, exp, nilable) + handler.ReplaceFunction(exp.Args[0].(*ast.CallExpr), ast.NewIdent(newFuncName)) + exp.Args[0].(*ast.CallExpr).Args = nil + + reportNilAssertion(pass, exp, handler, nilable, notEqual, oldExp, isItError) +} + +func handleNilComparisonErr(pass *analysis.Pass, exp *ast.CallExpr, nilable ast.Expr) (string, bool) { + newFuncName := beNil + isItError := isExprError(pass, nilable) + if isItError { + if _, ok := nilable.(*ast.CallExpr); ok { + newFuncName = succeed + } else { + reverseAssertionFuncLogic(exp) + newFuncName = haveOccurred + } + } + + return newFuncName, isItError +} + +func isAssertionFunc(name string) bool { + switch name { + case "To", "ToNot", "NotTo", "Should", "ShouldNot": + return true + } + return false +} + +func reportLengthAssertion(pass *analysis.Pass, expr *ast.CallExpr, handler gomegahandler.Handler, oldExpr string) { + actualExpr := handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + replaceLenActualArg(actualExpr, handler) + + report(pass, expr, wrongLengthWarningTemplate, oldExpr) +} + +func reportNilAssertion(pass *analysis.Pass, expr *ast.CallExpr, handler gomegahandler.Handler, nilable ast.Expr, notEqual bool, oldExpr string, isItError bool) { + actualExpr := handler.GetActualExpr(expr.Fun.(*ast.SelectorExpr)) + changed := replaceNilActualArg(actualExpr, handler, nilable) + if !changed { + return + } + + if notEqual { + reverseAssertionFuncLogic(expr) + } + template := wrongNilWarningTemplate + if isItError { + template = wrongErrWarningTemplate + } + + report(pass, expr, template, oldExpr) +} + +func report(pass *analysis.Pass, expr ast.Expr, messageTemplate, oldExpr string) { + newExp := goFmt(pass.Fset, expr) + pass.Report(analysis.Diagnostic{ + Pos: expr.Pos(), + Message: fmt.Sprintf(messageTemplate, newExp), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace %s with %s", oldExpr, newExp), + TextEdits: []analysis.TextEdit{ + { + Pos: expr.Pos(), + End: expr.End(), + NewText: []byte(newExp), + }, + }, + }, + }, + }) +} + +func reportNewName(pass *analysis.Pass, id *ast.Ident, newName string, messageTemplate, oldExpr string) { + pass.Report(analysis.Diagnostic{ + Pos: id.Pos(), + Message: fmt.Sprintf(messageTemplate, newName), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace %s with %s", oldExpr, newName), + TextEdits: []analysis.TextEdit{ + { + Pos: id.Pos(), + End: id.End(), + NewText: []byte(newName), + }, + }, + }, + }, + }) +} + +func reportNoFix(pass *analysis.Pass, pos token.Pos, message string, args ...any) { + if len(args) > 0 { + message = fmt.Sprintf(message, args...) + } + + pass.Report(analysis.Diagnostic{ + Pos: pos, + Message: message, + }) +} + +func getNilableFromComparison(actualArg ast.Expr) (ast.Expr, token.Token) { + bin, ok := actualArg.(*ast.BinaryExpr) + if !ok { + return nil, token.ILLEGAL + } + + if bin.Op == token.EQL || bin.Op == token.NEQ { + if isNil(bin.Y) { + return bin.X, bin.Op + } else if isNil(bin.X) { + return bin.Y, bin.Op + } + } + + return nil, token.ILLEGAL +} + +func isNil(expr ast.Expr) bool { + nilObject, ok := expr.(*ast.Ident) + return ok && nilObject.Name == "nil" && nilObject.Obj == nil +} + +func isComparison(pass *analysis.Pass, actualArg ast.Expr) (ast.Expr, ast.Expr, token.Token, bool) { + bin, ok := actualArg.(*ast.BinaryExpr) + if !ok { + return nil, nil, token.ILLEGAL, false + } + + first, second, op := bin.X, bin.Y, bin.Op + replace := false + switch realFirst := first.(type) { + case *ast.Ident: // check if const + info, ok := pass.TypesInfo.Types[realFirst] + if ok { + if _, ok := info.Type.(*gotypes.Basic); ok && info.Value != nil { + replace = true + } + } + + case *ast.BasicLit: + replace = true + } + + if replace { + first, second = second, first + } + + switch op { + case token.EQL: + case token.NEQ: + case token.GTR, token.GEQ, token.LSS, token.LEQ: + if replace { + op = reverseassertion.ChangeCompareOperator(op) + } + default: + return nil, nil, token.ILLEGAL, false + } + return first, second, op, true +} + +func goFmt(fset *token.FileSet, x ast.Expr) string { + var b bytes.Buffer + _ = printer.Fprint(&b, fset, x) + return b.String() +} + +func isExprError(pass *analysis.Pass, expr ast.Expr) bool { + actualArgType := pass.TypesInfo.TypeOf(expr) + switch t := actualArgType.(type) { + case *gotypes.Named: + if interfaces.ImplementsError(actualArgType) { + return true + } + case *gotypes.Tuple: + if t.Len() > 0 { + switch t0 := t.At(0).Type().(type) { + case *gotypes.Named, *gotypes.Pointer: + if interfaces.ImplementsError(t0) { + return true + } + } + } + } + return false +} + +func isPointer(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + _, ok := t.(*gotypes.Pointer) + return ok +} + +func isInterface(pass *analysis.Pass, expr ast.Expr) bool { + t := pass.TypesInfo.TypeOf(expr) + return gotypes.IsInterface(t) +} + +func isNumeric(pass *analysis.Pass, node ast.Expr) bool { + t := pass.TypesInfo.TypeOf(node) + + switch t.String() { + case "int", "uint", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "float32", "float64": + return true + } + return false +} + +func checkNoAssertion(pass *analysis.Pass, expr *ast.CallExpr, handler gomegahandler.Handler) { + funcName, ok := handler.GetActualFuncName(expr) + if ok { + if isActualFunc(funcName) { + reportNoFix(pass, expr.Pos(), missingAssertionMessage, funcName) + } else if isActualAsyncFunc(funcName) { + reportNoFix(pass, expr.Pos(), missingAsyncAssertionMessage, funcName) + } + } +} + +func isActualFunc(name string) bool { + return name == expect || name == expectWithOffset +} + +func isActualAsyncFunc(name string) bool { + switch name { + case eventually, eventuallyWithOffset, consistently, consistentlyWithOffset: + return true + } + return false +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/ginkgohandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/ginkgohandler/handler.go new file mode 100644 index 0000000000..c0829c4695 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/ginkgohandler/handler.go @@ -0,0 +1,97 @@ +package ginkgohandler + +import ( + "go/ast" +) + +const ( + importPath = `"github.com/onsi/ginkgo"` + importPathV2 = `"github.com/onsi/ginkgo/v2"` + + focusSpec = "Focus" +) + +// Handler provide different handling, depend on the way ginkgo was imported, whether +// in imported with "." name, custom name or without any name. +type Handler interface { + GetFocusContainerName(*ast.CallExpr) (bool, *ast.Ident) + IsFocusSpec(ident ast.Expr) bool +} + +// GetGinkgoHandler returns a ginkgor handler according to the way ginkgo was imported in the specific file +func GetGinkgoHandler(file *ast.File) Handler { + for _, imp := range file.Imports { + if imp.Path.Value != importPath && imp.Path.Value != importPathV2 { + continue + } + + switch name := imp.Name.String(); { + case name == ".": + return dotHandler{} + case name == "": // import with no local name + return nameHandler("ginkgo") + default: + return nameHandler(name) + } + } + + return nil // no ginkgo import; this file does not use ginkgo +} + +// dotHandler is used when importing ginkgo with dot; i.e. +// import . "github.com/onsi/ginkgo" +type dotHandler struct{} + +func (h dotHandler) GetFocusContainerName(exp *ast.CallExpr) (bool, *ast.Ident) { + if fun, ok := exp.Fun.(*ast.Ident); ok { + return isFocusContainer(fun.Name), fun + } + return false, nil +} + +func (h dotHandler) IsFocusSpec(exp ast.Expr) bool { + id, ok := exp.(*ast.Ident) + return ok && id.Name == focusSpec +} + +// nameHandler is used when importing ginkgo without name; i.e. +// import "github.com/onsi/ginkgo" +// +// or with a custom name; e.g. +// import customname "github.com/onsi/ginkgo" +type nameHandler string + +func (h nameHandler) GetFocusContainerName(exp *ast.CallExpr) (bool, *ast.Ident) { + if sel, ok := exp.Fun.(*ast.SelectorExpr); ok { + if id, ok := sel.X.(*ast.Ident); ok && id.Name == string(h) { + return isFocusContainer(sel.Sel.Name), sel.Sel + } + } + return false, nil +} + +func (h nameHandler) IsFocusSpec(exp ast.Expr) bool { + if selExp, ok := exp.(*ast.SelectorExpr); ok { + if x, ok := selExp.X.(*ast.Ident); ok && x.Name == string(h) { + return selExp.Sel.Name == focusSpec + } + } + + return false +} + +func isFocusContainer(name string) bool { + switch name { + case "FDescribe", "FContext", "FWhen", "FIt", "FDescribeTable", "FEntry": + return true + } + return false +} + +func IsContainer(id *ast.Ident) bool { + switch id.Name { + case "It", "When", "Context", "Describe", "DescribeTable", "Entry": + return true + } + return isFocusContainer(id.Name) +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/gomegahandler/handler.go b/vendor/github.com/nunnatsa/ginkgolinter/gomegahandler/handler.go new file mode 100644 index 0000000000..419145b75a --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/gomegahandler/handler.go @@ -0,0 +1,236 @@ +package gomegahandler + +import ( + "go/ast" + "go/token" +) + +const ( + importPath = `"github.com/onsi/gomega"` +) + +// Handler provide different handling, depend on the way gomega was imported, whether +// in imported with "." name, custom name or without any name. +type Handler interface { + // GetActualFuncName returns the name of the gomega function, e.g. `Expect` + GetActualFuncName(*ast.CallExpr) (string, bool) + // ReplaceFunction replaces the function with another one, for fix suggestions + ReplaceFunction(*ast.CallExpr, *ast.Ident) + + getDefFuncName(expr *ast.CallExpr) string + + getFieldType(field *ast.Field) string + + GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr +} + +// GetGomegaHandler returns a gomegar handler according to the way gomega was imported in the specific file +func GetGomegaHandler(file *ast.File) Handler { + for _, imp := range file.Imports { + if imp.Path.Value != importPath { + continue + } + + switch name := imp.Name.String(); { + case name == ".": + return dotHandler{} + case name == "": // import with no local name + return nameHandler("gomega") + default: + return nameHandler(name) + } + } + + return nil // no gomega import; this file does not use gomega +} + +// dotHandler is used when importing gomega with dot; i.e. +// import . "github.com/onsi/gomega" +type dotHandler struct{} + +// GetActualFuncName returns the name of the gomega function, e.g. `Expect` +func (h dotHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { + switch actualFunc := expr.Fun.(type) { + case *ast.Ident: + return actualFunc.Name, true + case *ast.SelectorExpr: + if isGomegaVar(actualFunc.X, h) { + return actualFunc.Sel.Name, true + } + + if x, ok := actualFunc.X.(*ast.CallExpr); ok { + return h.GetActualFuncName(x) + } + + case *ast.CallExpr: + return h.GetActualFuncName(actualFunc) + } + return "", false +} + +// ReplaceFunction replaces the function with another one, for fix suggestions +func (dotHandler) ReplaceFunction(caller *ast.CallExpr, newExpr *ast.Ident) { + caller.Fun = newExpr +} + +func (dotHandler) getDefFuncName(expr *ast.CallExpr) string { + if f, ok := expr.Fun.(*ast.Ident); ok { + return f.Name + } + return "" +} + +func (dotHandler) getFieldType(field *ast.Field) string { + switch t := field.Type.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + if name, ok := t.X.(*ast.Ident); ok { + return name.Name + } + } + return "" +} + +// nameHandler is used when importing gomega without name; i.e. +// import "github.com/onsi/gomega" +// +// or with a custom name; e.g. +// import customname "github.com/onsi/gomega" +type nameHandler string + +// GetActualFuncName returns the name of the gomega function, e.g. `Expect` +func (g nameHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { + selector, ok := expr.Fun.(*ast.SelectorExpr) + if !ok { + return "", false + } + + switch x := selector.X.(type) { + case *ast.Ident: + if x.Name != string(g) { + if !isGomegaVar(x, g) { + return "", false + } + } + + return selector.Sel.Name, true + + case *ast.CallExpr: + return g.GetActualFuncName(x) + } + + return "", false +} + +// ReplaceFunction replaces the function with another one, for fix suggestions +func (nameHandler) ReplaceFunction(caller *ast.CallExpr, newExpr *ast.Ident) { + caller.Fun.(*ast.SelectorExpr).Sel = newExpr +} + +func (g nameHandler) getDefFuncName(expr *ast.CallExpr) string { + if sel, ok := expr.Fun.(*ast.SelectorExpr); ok { + if f, ok := sel.X.(*ast.Ident); ok && f.Name == string(g) { + return sel.Sel.Name + } + } + return "" +} + +func (g nameHandler) getFieldType(field *ast.Field) string { + switch t := field.Type.(type) { + case *ast.SelectorExpr: + if id, ok := t.X.(*ast.Ident); ok { + if id.Name == string(g) { + return t.Sel.Name + } + } + case *ast.StarExpr: + if sel, ok := t.X.(*ast.SelectorExpr); ok { + if x, ok := sel.X.(*ast.Ident); ok && x.Name == string(g) { + return sel.Sel.Name + } + } + + } + return "" +} + +func isGomegaVar(x ast.Expr, handler Handler) bool { + if i, ok := x.(*ast.Ident); ok { + if i.Obj != nil && i.Obj.Kind == ast.Var { + switch decl := i.Obj.Decl.(type) { + case *ast.AssignStmt: + if decl.Tok == token.DEFINE { + if defFunc, ok := decl.Rhs[0].(*ast.CallExpr); ok { + fName := handler.getDefFuncName(defFunc) + switch fName { + case "NewGomega", "NewWithT", "NewGomegaWithT": + return true + } + } + } + case *ast.Field: + name := handler.getFieldType(decl) + switch name { + case "Gomega", "WithT", "GomegaWithT": + return true + } + } + } + } + return false +} + +func (h dotHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { + actualExpr, ok := assertionFunc.X.(*ast.CallExpr) + if !ok { + return nil + } + + switch fun := actualExpr.Fun.(type) { + case *ast.Ident: + return actualExpr + case *ast.SelectorExpr: + if isHelperMethods(fun.Sel.Name) { + return h.GetActualExpr(fun) + } + if isGomegaVar(fun.X, h) { + return actualExpr + } + } + return nil +} + +func (g nameHandler) GetActualExpr(assertionFunc *ast.SelectorExpr) *ast.CallExpr { + actualExpr, ok := assertionFunc.X.(*ast.CallExpr) + if !ok { + return nil + } + + switch fun := actualExpr.Fun.(type) { + case *ast.Ident: + return actualExpr + case *ast.SelectorExpr: + if x, ok := fun.X.(*ast.Ident); ok && x.Name == string(g) { + return actualExpr + } + if isHelperMethods(fun.Sel.Name) { + return g.GetActualExpr(fun) + } + + if isGomegaVar(fun.X, g) { + return actualExpr + } + } + return nil +} + +func isHelperMethods(funcName string) bool { + switch funcName { + case "WithOffset", "WithTimeout", "WithPolling", "Within", "ProbeEvery", "WithContext", "WithArguments", "MustPassRepeatedly": + return true + } + + return false +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/interfaces/interfaces.go b/vendor/github.com/nunnatsa/ginkgolinter/interfaces/interfaces.go new file mode 100644 index 0000000000..dafeacd4ff --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/interfaces/interfaces.go @@ -0,0 +1,76 @@ +package interfaces + +import ( + "go/token" + gotypes "go/types" +) + +var ( + errorType *gotypes.Interface + gomegaMatcherType *gotypes.Interface +) + +func init() { + errorType = gotypes.Universe.Lookup("error").Type().Underlying().(*gotypes.Interface) + gomegaMatcherType = generateTheGomegaMatcherInfType() +} + +// generateTheGomegaMatcherInfType generates a types.Interface instance that represents the +// GomegaMatcher interface. +// The original code is (copied from https://github.com/nunnatsa/ginkgolinter/blob/8fdd05eee922578d4699f49d267001c01e0b9f1e/testdata/src/a/vendor/github.com/onsi/gomega/types/types.go) +// +// type GomegaMatcher interface { +// Match(actual interface{}) (success bool, err error) +// FailureMessage(actual interface{}) (message string) +// NegatedFailureMessage(actual interface{}) (message string) +// } +func generateTheGomegaMatcherInfType() *gotypes.Interface { + err := gotypes.Universe.Lookup("error").Type() + bl := gotypes.Typ[gotypes.Bool] + str := gotypes.Typ[gotypes.String] + anyType := gotypes.Universe.Lookup("any").Type() + + return gotypes.NewInterfaceType([]*gotypes.Func{ + // Match(actual interface{}) (success bool, err error) + gotypes.NewFunc(token.NoPos, nil, "Match", gotypes.NewSignatureType( + nil, nil, nil, + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "actual", anyType), + ), + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", bl), + gotypes.NewVar(token.NoPos, nil, "", err), + ), false), + ), + // FailureMessage(actual interface{}) (message string) + gotypes.NewFunc(token.NoPos, nil, "FailureMessage", gotypes.NewSignatureType( + nil, nil, nil, + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", anyType), + ), + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", str), + ), + false), + ), + //NegatedFailureMessage(actual interface{}) (message string) + gotypes.NewFunc(token.NoPos, nil, "NegatedFailureMessage", gotypes.NewSignatureType( + nil, nil, nil, + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", anyType), + ), + gotypes.NewTuple( + gotypes.NewVar(token.NoPos, nil, "", str), + ), + false), + ), + }, nil) +} + +func ImplementsError(t gotypes.Type) bool { + return gotypes.Implements(t, errorType) +} + +func ImplementsGomegaMatcher(t gotypes.Type) bool { + return gotypes.Implements(t, gomegaMatcherType) +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/reverseassertion/reverse_assertion.go b/vendor/github.com/nunnatsa/ginkgolinter/reverseassertion/reverse_assertion.go new file mode 100644 index 0000000000..1dbd898106 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/reverseassertion/reverse_assertion.go @@ -0,0 +1,42 @@ +package reverseassertion + +import "go/token" + +var reverseLogicAssertions = map[string]string{ + "To": "ToNot", + "ToNot": "To", + "NotTo": "To", + "Should": "ShouldNot", + "ShouldNot": "Should", +} + +// ChangeAssertionLogic get gomega assertion function name, and returns the reverse logic function name +func ChangeAssertionLogic(funcName string) string { + if revFunc, ok := reverseLogicAssertions[funcName]; ok { + return revFunc + } + return funcName +} + +func IsNegativeLogic(funcName string) bool { + switch funcName { + case "ToNot", "NotTo", "ShouldNot": + return true + } + return false +} + +var reverseCompareOperators = map[token.Token]token.Token{ + token.LSS: token.GTR, + token.GTR: token.LSS, + token.LEQ: token.GEQ, + token.GEQ: token.LEQ, +} + +// ChangeCompareOperator return the reversed comparison operator +func ChangeCompareOperator(op token.Token) token.Token { + if revOp, ok := reverseCompareOperators[op]; ok { + return revOp + } + return op +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go new file mode 100644 index 0000000000..be510c4e95 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go @@ -0,0 +1,32 @@ +package types + +import ( + "errors" + "strings" +) + +// Boolean is a bool, implementing the flag.Value interface, to be used as a flag var. +type Boolean bool + +func (b *Boolean) Set(value string) error { + if b == nil { + return errors.New("trying to set nil parameter") + } + switch strings.ToLower(value) { + case "true": + *b = true + case "false": + *b = false + default: + return errors.New(value + " is not a Boolean value") + + } + return nil +} + +func (b Boolean) String() string { + if b { + return "true" + } + return "false" +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/types/config.go b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go new file mode 100644 index 0000000000..6de22738dd --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/types/config.go @@ -0,0 +1,94 @@ +package types + +import ( + "strings" + + "go/ast" +) + +const ( + suppressPrefix = "ginkgo-linter:" + suppressLengthAssertionWarning = suppressPrefix + "ignore-len-assert-warning" + suppressNilAssertionWarning = suppressPrefix + "ignore-nil-assert-warning" + suppressErrAssertionWarning = suppressPrefix + "ignore-err-assert-warning" + suppressCompareAssertionWarning = suppressPrefix + "ignore-compare-assert-warning" + suppressAsyncAsertWarning = suppressPrefix + "ignore-async-assert-warning" + suppressFocusContainerWarning = suppressPrefix + "ignore-focus-container-warning" + suppressTypeCompareWarning = suppressPrefix + "ignore-type-compare-warning" +) + +type Config struct { + SuppressLen Boolean + SuppressNil Boolean + SuppressErr Boolean + SuppressCompare Boolean + SuppressAsync Boolean + ForbidFocus Boolean + SuppressTypeCompare Boolean + AllowHaveLen0 Boolean +} + +func (s *Config) AllTrue() bool { + return bool(s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus) +} + +func (s *Config) Clone() Config { + return Config{ + SuppressLen: s.SuppressLen, + SuppressNil: s.SuppressNil, + SuppressErr: s.SuppressErr, + SuppressCompare: s.SuppressCompare, + SuppressAsync: s.SuppressAsync, + ForbidFocus: s.ForbidFocus, + SuppressTypeCompare: s.SuppressTypeCompare, + AllowHaveLen0: s.AllowHaveLen0, + } +} + +func (s *Config) UpdateFromComment(commentGroup []*ast.CommentGroup) { + for _, cmntList := range commentGroup { + if s.AllTrue() { + break + } + + for _, cmnt := range cmntList.List { + commentLines := strings.Split(cmnt.Text, "\n") + for _, comment := range commentLines { + comment = strings.TrimPrefix(comment, "//") + comment = strings.TrimPrefix(comment, "/*") + comment = strings.TrimSuffix(comment, "*/") + comment = strings.TrimSpace(comment) + + switch comment { + case suppressLengthAssertionWarning: + s.SuppressLen = true + case suppressNilAssertionWarning: + s.SuppressNil = true + case suppressErrAssertionWarning: + s.SuppressErr = true + case suppressCompareAssertionWarning: + s.SuppressCompare = true + case suppressAsyncAsertWarning: + s.SuppressAsync = true + case suppressFocusContainerWarning: + s.ForbidFocus = false + case suppressTypeCompareWarning: + s.SuppressTypeCompare = true + } + } + } + } +} + +func (s *Config) UpdateFromFile(cm ast.CommentMap) { + + for key, commentGroup := range cm { + if s.AllTrue() { + break + } + + if _, ok := key.(*ast.GenDecl); ok { + s.UpdateFromComment(commentGroup) + } + } +} diff --git a/vendor/github.com/nunnatsa/ginkgolinter/version/version.go b/vendor/github.com/nunnatsa/ginkgolinter/version/version.go new file mode 100644 index 0000000000..7bf181a8e8 --- /dev/null +++ b/vendor/github.com/nunnatsa/ginkgolinter/version/version.go @@ -0,0 +1,14 @@ +package version + +var ( + version = "unknown" + gitHash = "unknown" +) + +func Version() string { + return version +} + +func GitHash() string { + return gitHash +} diff --git a/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/vendor/github.com/pelletier/go-toml/v2/.dockerignore new file mode 100644 index 0000000000..7b5883475d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/vendor/github.com/pelletier/go-toml/v2/.gitattributes new file mode 100644 index 0000000000..34a0a21a36 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +benchmark/benchmark.toml text eol=lf +testdata/** text eol=lf diff --git a/vendor/github.com/pelletier/go-toml/v2/.gitignore b/vendor/github.com/pelletier/go-toml/v2/.gitignore new file mode 100644 index 0000000000..a69e2b0ebd --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -0,0 +1,6 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen +dist \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml new file mode 100644 index 0000000000..067db55174 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.golangci.toml @@ -0,0 +1,84 @@ +[service] +golangci-lint-version = "1.39.0" + +[linters-settings.wsl] +allow-assign-and-anything = true + +[linters-settings.exhaustive] +default-signifies-exhaustive = true + +[linters] +disable-all = true +enable = [ + "asciicheck", + "bodyclose", + "cyclop", + "deadcode", + "depguard", + "dogsled", + "dupl", + "durationcheck", + "errcheck", + "errorlint", + "exhaustive", + # "exhaustivestruct", + "exportloopref", + "forbidigo", + # "forcetypeassert", + "funlen", + "gci", + # "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godot", + "godox", + # "goerr113", + "gofmt", + "gofumpt", + "goheader", + "goimports", + "golint", + "gomnd", + # "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "govet", + # "ifshort", + "importas", + "ineffassign", + "lll", + "makezero", + "misspell", + "nakedret", + "nestif", + "nilerr", + # "nlreturn", + "noctx", + "nolintlint", + #"paralleltest", + "prealloc", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + # "testpackage", + "thelper", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "wastedassign", + "whitespace", + # "wrapcheck", + # "wsl" +] diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml new file mode 100644 index 0000000000..3aa1840ec4 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -0,0 +1,123 @@ +before: + hooks: + - go mod tidy + - go fmt ./... + - go test ./... +builds: + - id: tomll + main: ./cmd/tomll + binary: tomll + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: tomljson + main: ./cmd/tomljson + binary: tomljson + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: jsontoml + main: ./cmd/jsontoml + binary: jsontoml + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 +universal_binaries: + - id: tomll + replace: true + name_template: tomll + - id: tomljson + replace: true + name_template: tomljson + - id: jsontoml + replace: true + name_template: jsontoml +archives: +- id: jsontoml + format: tar.xz + builds: + - jsontoml + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomljson + format: tar.xz + builds: + - tomljson + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomll + format: tar.xz + builds: + - tomll + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +dockers: + - id: tools + goos: linux + goarch: amd64 + ids: + - jsontoml + - tomljson + - tomll + image_templates: + - "ghcr.io/pelletier/go-toml:latest" + - "ghcr.io/pelletier/go-toml:{{ .Tag }}" + - "ghcr.io/pelletier/go-toml:v{{ .Major }}" + skip_push: false +checksum: + name_template: 'sha256sums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +release: + github: + owner: pelletier + name: go-toml + draft: true + prerelease: auto + mode: replace +changelog: + use: github-native +announce: + skip: true diff --git a/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..04dd12bcbc --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -0,0 +1,196 @@ +# Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use and efficient TOML +implementation for Go that gets the job done and gets out of your way – dealing +with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or small, is +more than welcomed! + +## Ask questions + +Any question you may have, somebody else might have it too. Always feel free to +ask them on the [discussion board][discussions]. We will try to answer them as +clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and ask +away! + +[discussions]: https://github.com/pelletier/go-toml/discussions + +## Improve the documentation + +The best way to share your knowledge and experience with go-toml is to improve +the documentation. Fix a typo, clarify an interface, add an example, anything +goes! + +The documentation is present in the [README][readme] and thorough the source +code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change +to the documentation, create a pull request with your proposed changes. For +simple changes like that, the easiest way to go is probably the "Fork this +project and edit the file" button on Github, displayed at the top right of the +file. Unless it's a trivial change (for example a typo), provide a little bit of +context in your pull request description or commit message. + +## Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and fix by +reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on what to +include. When in doubt: add more details! By reducing ambiguity and providing +more information, it decreases back and forth and saves everyone time. + +## Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +- A short proposal with some POC code is better than a lengthy piece of text + with no code. Code speaks louder than words. That being said, bigger changes + should probably start with a [discussion][discussions]. +- No backward-incompatible patch will be accepted unless discussed. Sometimes + it's hard, but we try not to break people's programs unless we absolutely have + to. +- If you are writing a new feature or extending an existing one, make sure to + write some documentation. +- Bug fixes need to be accompanied with regression tests. +- New code needs to be tested. +- Your commit messages need to explain why the change is needed, even if already + included in the PR description. + +It does sound like a lot, but those best practices are here to save time overall +and continuously improve the quality of the project, which is something everyone +benefits from. + +### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +### Run the tests + +You can run tests for go-toml using Go's test tool: `go test -race ./...`. + +During the pull request process, all tests will be ran on Linux, Windows, and +MacOS on the last two versions of Go. + +However, given GitHub's new policy to _not_ run Actions on pull requests until a +maintainer clicks on button, it is highly recommended that you run them locally +as you make changes. + +### Check coverage + +We use `go tool cover` to compute test coverage. Most code editors have a way to +run and display code coverage, but at the end of the day, we do this: + +``` +go test -covermode=atomic -coverprofile=coverage.out +go tool cover -func=coverage.out +``` + +and verify that the overall percentage of tested code does not go down. This is +a requirement. As a rule of thumb, all lines of code touched by your changes +should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your +code lowers the coverage. + +### Verify performance + +Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's +builtin benchmark systems. Because of their noisy nature, containers provided by +Github Actions cannot be reliably used for benchmarking. As a result, you are +responsible for checking that your changes do not incur a performance penalty. +You can run their following to execute benchmarks: + +``` +go test ./... -bench=. -count=10 +``` + +Benchmark results should be compared against each other with +[benchstat][benchstat]. Typical flow looks like this: + +1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to + a file (for example `old.txt`). +2. Make some code changes. +3. Run `go test ....` again, and save the output to an other file (for example + `new.txt`). +4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any + test. + +On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts +performance. + +It is highly encouraged to add the benchstat results to your pull request +description. Pull requests that lower performance will receive more scrutiny. + +[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat + +### Style + +Try to look around and follow the same format and structure as the rest of the +code. We enforce using `go fmt` on the whole code base. + +--- + +## Maintainers-only + +### Merge pull request + +Checklist: + +- Passing CI. +- Does not introduce backward-incompatible changes (unless discussed). +- Has relevant doc changes. +- Benchstat does not show performance regression. +- Pull request is [labeled appropriately][pr-labels]. +- Title will be understandable in the changelog. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +### New release + +1. Decide on the next version number. Use semver. +2. Generate release notes using [`gh`][gh]. Example: +``` +$ gh api -X POST \ + -F tag_name='v2.0.0-beta.5' \ + -F target_commitish='v2' \ + -F previous_tag_name='v2.0.0-beta.4' \ + --jq '.body' \ + repos/pelletier/go-toml/releases/generate-notes +``` +3. Look for "Other changes". That would indicate a pull request not labeled + properly. Tweak labels and pull request titles until changelog looks good for + users. +4. [Draft new release][new-release]. +5. Fill tag and target with the same value used to generate the changelog. +6. Set title to the new tag value. +7. Paste the generated changelog. +8. Check "create discussion", in the "Releases" category. +9. Check pre-release if new version is an alpha or beta. + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[new-release]: https://github.com/pelletier/go-toml/releases/new +[gh]: https://github.com/cli/cli +[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/vendor/github.com/pelletier/go-toml/v2/Dockerfile new file mode 100644 index 0000000000..b9e9332379 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch +ENV PATH "$PATH:/bin" +COPY tomll /bin/tomll +COPY tomljson /bin/tomljson +COPY jsontoml /bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/v2/LICENSE b/vendor/github.com/pelletier/go-toml/v2/LICENSE new file mode 100644 index 0000000000..6839d51cd4 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2022 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md new file mode 100644 index 0000000000..a63c3a7967 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -0,0 +1,552 @@ +# go-toml v2 + +Go library for the [TOML](https://toml.io/en/) format. + +This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). + +[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) + +[💬 Anything else](https://github.com/pelletier/go-toml/discussions) + +## Documentation + +Full API, examples, and implementation notes are available in the Go +documentation. + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) + +## Import + +```go +import "github.com/pelletier/go-toml/v2" +``` + +See [Modules](#Modules). + +## Features + +### Stdlib behavior + +As much as possible, this library is designed to behave similarly as the +standard library's `encoding/json`. + +### Performance + +While go-toml favors usability, it is written with performance in mind. Most +operations should not be shockingly slow. See [benchmarks](#benchmarks). + +### Strict mode + +`Decoder` can be set to "strict mode", which makes it error when some parts of +the TOML document was not present in the target structure. This is a great way +to check for typos. [See example in the documentation][strict]. + +[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields + +### Contextualized errors + +When most decoding errors occur, go-toml returns [`DecodeError`][decode-err]), +which contains a human readable contextualized version of the error. For +example: + +``` +2| key1 = "value1" +3| key2 = "missing2" + | ~~~~ missing field +4| key3 = "missing3" +5| key4 = "value4" +``` + +[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError + +### Local date and time support + +TOML supports native [local date/times][ldt]. It allows to represent a given +date, time, or date-time without relation to a timezone or offset. To support +this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and +[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, +making them convenient yet unambiguous structures for their respective TOML +representation. + +[ldt]: https://toml.io/en/v1.0.0#local-date-time +[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate +[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime +[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime + +## Getting started + +Given the following struct, let's see how to read it and write it as TOML: + +```go +type MyConfig struct { + Version int + Name string + Tags []string +} +``` + +### Unmarshaling + +[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its +content. For example: + +```go +doc := ` +version = 2 +name = "go-toml" +tags = ["go", "toml"] +` + +var cfg MyConfig +err := toml.Unmarshal([]byte(doc), &cfg) +if err != nil { + panic(err) +} +fmt.Println("version:", cfg.Version) +fmt.Println("name:", cfg.Name) +fmt.Println("tags:", cfg.Tags) + +// Output: +// version: 2 +// name: go-toml +// tags: [go toml] +``` + +[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal + +### Marshaling + +[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure +as a TOML document: + +```go +cfg := MyConfig{ + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, +} + +b, err := toml.Marshal(cfg) +if err != nil { + panic(err) +} +fmt.Println(string(b)) + +// Output: +// Version = 2 +// Name = 'go-toml' +// Tags = ['go', 'toml'] +``` + +[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal + +## Benchmarks + +Execution time speedup compared to other Go TOML libraries: + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x1.9x
Marshal/ReferenceFile/map-21.7x1.8x
Marshal/ReferenceFile/struct-22.2x2.5x
Unmarshal/HugoFrontMatter-22.9x2.9x
Unmarshal/ReferenceFile/map-22.6x2.9x
Unmarshal/ReferenceFile/struct-24.4x5.3x
+
See more +

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

+ + + + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.9x
Marshal/SimpleDocument/struct-22.7x4.2x
Unmarshal/SimpleDocument/map-24.5x3.1x
Unmarshal/SimpleDocument/struct-26.2x3.9x
UnmarshalDataset/example-23.1x3.5x
UnmarshalDataset/code-22.3x3.1x
UnmarshalDataset/twitter-22.5x2.6x
UnmarshalDataset/citm_catalog-22.1x2.2x
UnmarshalDataset/canada-21.6x1.3x
UnmarshalDataset/config-24.3x3.2x
[Geo mean]2.7x2.8x
+

This table can be generated with ./ci.sh benchmark -a -html.

+
+ +## Modules + +go-toml uses Go's standard modules system. + +Installation instructions: + +- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals + with it automatically. +- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. + +In case of trouble: [Go Modules FAQ][mod-faq]. + +[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module + +## Tools + +Go-toml provides three handy command line tools: + + * `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` + + * `tomll`: Lints and reformats a TOML file. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` + +### Docker image + +Those tools are also available as a [Docker image][docker]. For example, to use +`tomljson`: + +``` +docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml +``` + +Multiple versions are availble on [ghcr.io][docker]. + +[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml + +## Migrating from v1 + +This section describes the differences between v1 and v2, with some pointers on +how to get the original behavior when possible. + +### Decoding / Unmarshal + +#### Automatic field name guessing + +When unmarshaling to a struct, if a key in the TOML document does not exactly +match the name of a struct field or any of the `toml`-tagged field, v1 tries +multiple variations of the key ([code][v1-keys]). + +V2 instead does a case-insensitive matching, like `encoding/json`. + +This could impact you if you are relying on casing to differentiate two fields, +and one of them is a not using the `toml` struct tag. The recommended solution +is to be specific about tag names for those fields using the `toml` struct tag. + +[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 + +#### Ignore preexisting value in interface + +When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the +element in the interface to decode the object. For example: + +```go +type inner struct { + B interface{} +} +type doc struct { + A interface{} +} + +d := doc{ + A: inner{ + B: "Before", + }, +} + +data := ` +[A] +B = "After" +` + +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v1: %#v\n", d) + +// toml v1: main.doc{A:main.inner{B:"After"}} +``` + +In this case, field `A` is of type `interface{}`, containing a `inner` struct. +V1 sees that type and uses it when decoding the object. + +When decoding an object into an `interface{}`, V2 instead disregards whatever +value the `interface{}` may contain and replaces it with a +`map[string]interface{}`. With the same data structure as above, here is what +the result looks like: + +```go +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v2: %#v\n", d) + +// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Values out of array bounds ignored + +When decoding into an array, v1 returns an error when the number of elements +contained in the doc is superior to the capacity of the array. For example: + +```go +type doc struct { + A [2]string +} +d := doc{} +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println(err) + +// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) +``` + +In the same situation, v2 ignores the last value: + +```go +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println("err:", err, "d:", d) +// err: d: {[one two]} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Support for `toml.Unmarshaler` has been dropped + +This method was not widely used, poorly defined, and added a lot of complexity. +A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` +interface and use strings. + +#### Support for `default` struct tag has been dropped + +This feature adds complexity and a poorly defined API for an effect that can be +accomplished outside of the library. + +It does not seem like other format parsers in Go support that feature (the +project referenced in the original ticket #202 has not been updated since 2017). +Given that go-toml v2 should not touch values not in the document, the same +effect can be achieved by pre-filling the struct with defaults (libraries like +[go-defaults][go-defaults] can help). Also, string representation is not well +defined for all types: it creates issues like #278. + +The recommended replacement is pre-filling the struct before unmarshaling. + +[go-defaults]: https://github.com/mcuadros/go-defaults + +#### `toml.Tree` replacement + +This structure was the initial attempt at providing a document model for +go-toml. It allows manipulating the structure of any document, encoding and +decoding from their TOML representation. While a more robust feature was +initially planned in go-toml v2, this has been ultimately [removed from +scope][nodoc] of this library, with no plan to add it back at the moment. The +closest equivalent at the moment would be to unmarshal into an `interface{}` and +use type assertions and/or reflection to manipulate the arbitrary +structure. However this would fall short of providing all of the TOML features +such as adding comments and be specific about whitespace. + + +#### `toml.Position` are not retrievable anymore + +The API for retrieving the position (line, column) of a specific TOML element do +not exist anymore. This was done to minimize the amount of concepts introduced +by the library (query path), and avoid the performance hit related to storing +positions in the absence of a document model, for a feature that seemed to have +little use. Errors however have gained more detailed position +information. Position retrieval seems better fitted for a document model, which +has been [removed from the scope][nodoc] of go-toml v2 at the moment. + +### Encoding / Marshal + +#### Default struct fields order + +V1 emits struct fields order alphabetically by default. V2 struct fields are +emitted in order they are defined. For example: + +```go +type S struct { + B string + A string +} + +data := S{ + B: "B", + A: "A", +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +// Output: +// v1: +// A = "A" +// B = "B" + +// v2: +// B = 'B' +// A = 'A' +``` + +There is no way to make v2 encoder behave like v1. A workaround could be to +manually sort the fields alphabetically in the struct definition, or generate +struct types using `reflect.StructOf`. + +#### No indentation by default + +V1 automatically indents content of tables by default. V2 does not. However the +same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: + +```go +data := map[string]interface{}{ + "table": map[string]string{ + "key": "value", + }, +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +buf := bytes.Buffer{} +enc := tomlv2.NewEncoder(&buf) +enc.SetIndentTables(true) +enc.Encode(data) +fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) + +// Output: +// v1: +// +// [table] +// key = "value" +// +// v2: +// [table] +// key = 'value' +// +// +// v2 Encoder: +// [table] +// key = 'value' +``` + +[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables + +#### Keys and strings are single quoted + +V1 always uses double quotes (`"`) around strings and keys that cannot be +represented bare (unquoted). V2 uses single quotes instead by default (`'`), +unless a character cannot be represented, then falls back to double quotes. As a +result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not +useful anymore. + +There is no way to make v2 encoder behave like v1. + +#### `TextMarshaler` emits as a string, not TOML + +Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in +v1. The encoder would append the result to the output directly. In v2 the result +is wrapped in a string. As a result, this interface cannot be implemented by the +root object. + +There is no way to make v2 encoder behave like v1. + +[tm]: https://golang.org/pkg/encoding/#TextMarshaler + +#### `Encoder.CompactComments` has been removed + +Emitting compact comments is now the default behavior of go-toml. This option +is not necessary anymore. + +#### Struct tags have been merged + +V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, +`toml`, and `omitempty`. To behave more like the standard library, v2 has merged +`toml`, `multiline`, and `omitempty`. For example: + +```go +type doc struct { + // v1 + F string `toml:"field" multiline:"true" omitempty:"true"` + // v2 + F string `toml:"field,multiline,omitempty"` +} +``` + +Has a result, the `Encoder.SetTag*` methods have been removed, as there is just +one tag now. + + +#### `commented` tag has been removed + +There is no replacement for the `commented` tag. This feature would be better +suited in a proper document model for go-toml v2, which has been [cut from +scope][nodoc] at the moment. + +#### `Encoder.ArraysWithOneElementPerLine` has been renamed + +The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. + +#### `Encoder.Indentation` has been renamed + +The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. + + +#### Embedded structs behave like stdlib + +V1 defaults to merging embedded struct fields into the embedding struct. This +behavior was unexpected because it does not follow the standard library. To +avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was +added to make the encoder behave correctly. Given backward compatibility is not +a problem anymore, v2 does the right thing by default: it follows the behavior +of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. + +[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 + +### `query` + +go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run +JSONPath-style queries on TOML files. This feature is not available in v2. For a +replacement, check out [dasel][dasel]. + +This package has been removed because it was essentially not supported anymore +(last commit May 2020), increased the complexity of the code base, and more +complete solutions exist out there. + +[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query +[dasel]: https://github.com/TomWright/dasel + +## Versioning + +Go-toml follows [Semantic Versioning](http://semver.org/). The supported version +of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of +this document. The last two major versions of Go are supported +(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md new file mode 100644 index 0000000000..b2f21cfc92 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/v2/ci.sh b/vendor/github.com/pelletier/go-toml/v2/ci.sh new file mode 100644 index 0000000000..d916c5f237 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -0,0 +1,279 @@ +#!/usr/bin/env bash + + +stderr() { + echo "$@" 1>&2 +} + +usage() { + b=$(basename "$0") + echo $b: ERROR: "$@" 1>&2 + + cat 1>&2 < coverage.out + go tool cover -func=coverage.out + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +coverage() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + output_dir="$(mktemp -d)" + target_out="${output_dir}/target.txt" + head_out="${output_dir}/head.txt" + + cover "${target}" > "${target_out}" + cover "HEAD" > "${head_out}" + + cat "${target_out}" + cat "${head_out}" + + echo "" + + target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" + head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" + echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" + + delta_pct=$(echo "$head_pct - $target_pct" | bc -l) + echo "Delta: ${delta_pct}" + + if [[ $delta_pct = \-* ]]; then + echo "Regression!"; + + target_diff="${output_dir}/target.diff.txt" + head_diff="${output_dir}/head.diff.txt" + cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" + cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" + + diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" + return 1 + fi + return 0 + ;; + esac + + cover "${1-HEAD}" +} + +bench() { + branch="${1}" + out="${2}" + replace="${3}" + dir="$(mktemp -d)" + + stderr "Executing benchmark for ${branch} at ${dir}" + + if [ "${branch}" = "HEAD" ]; then + cp -r . "${dir}/" + else + git worktree add "$dir" "$branch" + fi + + pushd "$dir" + + if [ "${replace}" != "" ]; then + find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; + go get "${replace}" + fi + + export GOMAXPROCS=2 + nice -n -19 taskset --cpu-list 0,1 go test '-bench=^Benchmark(Un)?[mM]arshal' -count=5 -run=Nothing ./... | tee "${out}" + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +fmktemp() { + if mktemp --version|grep GNU >/dev/null; then + mktemp --suffix=-$1; + else + mktemp -t $1; + fi +} + +benchstathtml() { +python3 - $1 <<'EOF' +import sys + +lines = [] +stop = False + +with open(sys.argv[1]) as f: + for line in f.readlines(): + line = line.strip() + if line == "": + stop = True + if not stop: + lines.append(line.split(',')) + +results = [] +for line in reversed(lines[1:]): + v2 = float(line[1]) + results.append([ + line[0].replace("-32", ""), + "%.1fx" % (float(line[3])/v2), # v1 + "%.1fx" % (float(line[5])/v2), # bs + ]) +# move geomean to the end +results.append(results[0]) +del results[0] + + +def printtable(data): + print(""" + + + + + """) + + for r in data: + print(" ".format(*r)) + + print(""" +
Benchmarkgo-toml v1BurntSushi/toml
{}{}{}
""") + + +def match(x): + return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] + +above = [x for x in results if match(x)] +below = [x for x in results if not match(x)] + +printtable(above) +print("
See more") +print("""

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

""") +printtable(below) +print('

This table can be generated with ./ci.sh benchmark -a -html.

') +print("
") + +EOF +} + +benchmark() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + old=`fmktemp ${target}` + bench "${target}" "${old}" + + new=`fmktemp HEAD` + bench HEAD "${new}" + + benchstat "${old}" "${new}" + return 0 + ;; + -a) + shift + + v2stats=`fmktemp go-toml-v2` + bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" + v1stats=`fmktemp go-toml-v1` + bench HEAD "${v1stats}" "github.com/pelletier/go-toml" + bsstats=`fmktemp bs-toml` + bench HEAD "${bsstats}" "github.com/BurntSushi/toml" + + cp "${v2stats}" go-toml-v2.txt + cp "${v1stats}" go-toml-v1.txt + cp "${bsstats}" bs-toml.txt + + if [ "$1" = "-html" ]; then + tmpcsv=`fmktemp csv` + benchstat -csv -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstathtml $tmpcsv + else + benchstat -geomean go-toml-v2.txt go-toml-v1.txt bs-toml.txt + fi + + rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt + return $? + esac + + bench "${1-HEAD}" `mktemp` +} + +case "$1" in + coverage) shift; coverage $@;; + benchmark) shift; benchmark $@;; + *) usage "bad argument $1";; +esac diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go new file mode 100644 index 0000000000..4af965360a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -0,0 +1,544 @@ +package toml + +import ( + "fmt" + "math" + "strconv" + "time" +) + +func parseInteger(b []byte) (int64, error) { + if len(b) > 2 && b[0] == '0' { + switch b[1] { + case 'x': + return parseIntHex(b) + case 'b': + return parseIntBin(b) + case 'o': + return parseIntOct(b) + default: + panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) + } + } + + return parseIntDec(b) +} + +func parseLocalDate(b []byte) (LocalDate, error) { + // full-date = date-fullyear "-" date-month "-" date-mday + // date-fullyear = 4DIGIT + // date-month = 2DIGIT ; 01-12 + // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year + var date LocalDate + + if len(b) != 10 || b[4] != '-' || b[7] != '-' { + return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD") + } + + var err error + + date.Year, err = parseDecimalDigits(b[0:4]) + if err != nil { + return LocalDate{}, err + } + + date.Month, err = parseDecimalDigits(b[5:7]) + if err != nil { + return LocalDate{}, err + } + + date.Day, err = parseDecimalDigits(b[8:10]) + if err != nil { + return LocalDate{}, err + } + + if !isValidDate(date.Year, date.Month, date.Day) { + return LocalDate{}, newDecodeError(b, "impossible date") + } + + return date, nil +} + +func parseDecimalDigits(b []byte) (int, error) { + v := 0 + + for i, c := range b { + if c < '0' || c > '9' { + return 0, newDecodeError(b[i:i+1], "expected digit (0-9)") + } + v *= 10 + v += int(c - '0') + } + + return v, nil +} + +func parseDateTime(b []byte) (time.Time, error) { + // offset-date-time = full-date time-delim full-time + // full-time = partial-time time-offset + // time-offset = "Z" / time-numoffset + // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute + + dt, b, err := parseLocalDateTime(b) + if err != nil { + return time.Time{}, err + } + + var zone *time.Location + + if len(b) == 0 { + // parser should have checked that when assigning the date time node + panic("date time should have a timezone") + } + + if b[0] == 'Z' || b[0] == 'z' { + b = b[1:] + zone = time.UTC + } else { + const dateTimeByteLen = 6 + if len(b) != dateTimeByteLen { + return time.Time{}, newDecodeError(b, "invalid date-time timezone") + } + var direction int + switch b[0] { + case '-': + direction = -1 + case '+': + direction = +1 + default: + return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character") + } + + if b[3] != ':' { + return time.Time{}, newDecodeError(b[3:4], "expected a : separator") + } + + hours, err := parseDecimalDigits(b[1:3]) + if err != nil { + return time.Time{}, err + } + if hours > 23 { + return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours") + } + + minutes, err := parseDecimalDigits(b[4:6]) + if err != nil { + return time.Time{}, err + } + if minutes > 59 { + return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes") + } + + seconds := direction * (hours*3600 + minutes*60) + if seconds == 0 { + zone = time.UTC + } else { + zone = time.FixedZone("", seconds) + } + b = b[dateTimeByteLen:] + } + + if len(b) > 0 { + return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone") + } + + t := time.Date( + dt.Year, + time.Month(dt.Month), + dt.Day, + dt.Hour, + dt.Minute, + dt.Second, + dt.Nanosecond, + zone) + + return t, nil +} + +func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { + var dt LocalDateTime + + const localDateTimeByteMinLen = 11 + if len(b) < localDateTimeByteMinLen { + return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + } + + date, err := parseLocalDate(b[:10]) + if err != nil { + return dt, nil, err + } + dt.LocalDate = date + + sep := b[10] + if sep != 'T' && sep != ' ' && sep != 't' { + return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space") + } + + t, rest, err := parseLocalTime(b[11:]) + if err != nil { + return dt, nil, err + } + dt.LocalTime = t + + return dt, rest, nil +} + +// parseLocalTime is a bit different because it also returns the remaining +// []byte that is didn't need. This is to allow parseDateTime to parse those +// remaining bytes as a timezone. +func parseLocalTime(b []byte) (LocalTime, []byte, error) { + var ( + nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} + t LocalTime + ) + + // check if b matches to have expected format HH:MM:SS[.NNNNNN] + const localTimeByteLen = 8 + if len(b) < localTimeByteLen { + return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + } + + var err error + + t.Hour, err = parseDecimalDigits(b[0:2]) + if err != nil { + return t, nil, err + } + + if t.Hour > 23 { + return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23") + } + if b[2] != ':' { + return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes") + } + + t.Minute, err = parseDecimalDigits(b[3:5]) + if err != nil { + return t, nil, err + } + if t.Minute > 59 { + return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59") + } + if b[5] != ':' { + return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds") + } + + t.Second, err = parseDecimalDigits(b[6:8]) + if err != nil { + return t, nil, err + } + + if t.Second > 60 { + return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60") + } + + b = b[8:] + + if len(b) >= 1 && b[0] == '.' { + frac := 0 + precision := 0 + digits := 0 + + for i, c := range b[1:] { + if !isDigit(c) { + if i == 0 { + return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point") + } + break + } + digits++ + + const maxFracPrecision = 9 + if i >= maxFracPrecision { + // go-toml allows decoding fractional seconds + // beyond the supported precision of 9 + // digits. It truncates the fractional component + // to the supported precision and ignores the + // remaining digits. + // + // https://github.com/pelletier/go-toml/discussions/707 + continue + } + + frac *= 10 + frac += int(c - '0') + precision++ + } + + if precision == 0 { + return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit") + } + + t.Nanosecond = frac * nspow[precision] + t.Precision = precision + + return t, b[1+digits:], nil + } + return t, b, nil +} + +//nolint:cyclop +func parseFloat(b []byte) (float64, error) { + if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { + return math.NaN(), nil + } + + cleaned, err := checkAndRemoveUnderscoresFloats(b) + if err != nil { + return 0, err + } + + if cleaned[0] == '.' { + return 0, newDecodeError(b, "float cannot start with a dot") + } + + if cleaned[len(cleaned)-1] == '.' { + return 0, newDecodeError(b, "float cannot end with a dot") + } + + dotAlreadySeen := false + for i, c := range cleaned { + if c == '.' { + if dotAlreadySeen { + return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point") + } + if !isDigit(cleaned[i-1]) { + return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit") + } + if !isDigit(cleaned[i+1]) { + return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit") + } + dotAlreadySeen = true + } + } + + start := 0 + if cleaned[0] == '+' || cleaned[0] == '-' { + start = 1 + } + if cleaned[start] == '0' && isDigit(cleaned[start+1]) { + return 0, newDecodeError(b, "float integer part cannot have leading zeroes") + } + + f, err := strconv.ParseFloat(string(cleaned), 64) + if err != nil { + return 0, newDecodeError(b, "unable to parse float: %w", err) + } + + return f, nil +} + +func parseIntHex(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 16, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err) + } + + return i, nil +} + +func parseIntOct(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 8, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse octal number: %w", err) + } + + return i, nil +} + +func parseIntBin(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 2, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse binary number: %w", err) + } + + return i, nil +} + +func isSign(b byte) bool { + return b == '+' || b == '-' +} + +func parseIntDec(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b) + if err != nil { + return 0, err + } + + startIdx := 0 + + if isSign(cleaned[0]) { + startIdx++ + } + + if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { + return 0, newDecodeError(b, "leading zero not allowed on decimal number") + } + + i, err := strconv.ParseInt(string(cleaned), 10, 64) + if err != nil { + return 0, newDecodeError(b, "couldn't parse decimal number: %w", err) + } + + return i, nil +} + +func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { + start := 0 + if b[start] == '+' || b[start] == '-' { + start++ + } + + if len(b) == start { + return b, nil + } + + if b[start] == '_' { + return nil, newDecodeError(b[start:start+1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, i, len(b)) + copy(cleaned, b) + + for i++; i < len(b); i++ { + c := b[i] + if c == '_' { + if !before { + return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + } + before = false + } else { + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { + if b[0] == '_' { + return nil, newDecodeError(b[0:1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, 0, len(b)) + + for i := 0; i < len(b); i++ { + c := b[i] + + switch c { + case '_': + if !before { + return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + } + if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { + return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent") + } + before = false + case '+', '-': + // signed exponents + cleaned = append(cleaned, c) + before = false + case 'e', 'E': + if i < len(b)-1 && b[i+1] == '_' { + return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent") + } + cleaned = append(cleaned, c) + case '.': + if i < len(b)-1 && b[i+1] == '_' { + return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point") + } + if i > 0 && b[i-1] == '_' { + return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point") + } + cleaned = append(cleaned, c) + default: + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +// isValidDate checks if a provided date is a date that exists. +func isValidDate(year int, month int, day int) bool { + return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m int, year int) int { + if m == 2 && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/doc.go b/vendor/github.com/pelletier/go-toml/v2/doc.go new file mode 100644 index 0000000000..b7bc599bde --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/doc.go @@ -0,0 +1,2 @@ +// Package toml is a library to read and write TOML documents. +package toml diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go new file mode 100644 index 0000000000..2e7f0ffdf8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -0,0 +1,270 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// DecodeError represents an error encountered during the parsing or decoding +// of a TOML document. +// +// In addition to the error message, it contains the position in the document +// where it happened, as well as a human-readable representation that shows +// where the error occurred in the document. +type DecodeError struct { + message string + line int + column int + key Key + + human string +} + +// StrictMissingError occurs in a TOML document that does not have a +// corresponding field in the target value. It contains all the missing fields +// in Errors. +// +// Emitted by Decoder when DisallowUnknownFields() was called. +type StrictMissingError struct { + // One error per field that could not be found. + Errors []DecodeError +} + +// Error returns the canonical string for this error. +func (s *StrictMissingError) Error() string { + return "strict mode: fields in the document are missing in the target struct" +} + +// String returns a human readable description of all errors. +func (s *StrictMissingError) String() string { + var buf strings.Builder + + for i, e := range s.Errors { + if i > 0 { + buf.WriteString("\n---\n") + } + + buf.WriteString(e.String()) + } + + return buf.String() +} + +type Key []string + +// internal version of DecodeError that is used as the base to create a +// DecodeError with full context. +type decodeError struct { + highlight []byte + message string + key Key // optional +} + +func (de *decodeError) Error() string { + return de.message +} + +func newDecodeError(highlight []byte, format string, args ...interface{}) error { + return &decodeError{ + highlight: highlight, + message: fmt.Errorf(format, args...).Error(), + } +} + +// Error returns the error message contained in the DecodeError. +func (e *DecodeError) Error() string { + return "toml: " + e.message +} + +// String returns the human-readable contextualized error. This string is multi-line. +func (e *DecodeError) String() string { + return e.human +} + +// Position returns the (line, column) pair indicating where the error +// occurred in the document. Positions are 1-indexed. +func (e *DecodeError) Position() (row int, column int) { + return e.line, e.column +} + +// Key that was being processed when the error occurred. The key is present only +// if this DecodeError is part of a StrictMissingError. +func (e *DecodeError) Key() Key { + return e.key +} + +// decodeErrorFromHighlight creates a DecodeError referencing a highlighted +// range of bytes from document. +// +// highlight needs to be a sub-slice of document, or this function panics. +// +// The function copies all bytes used in DecodeError, so that document and +// highlight can be freely deallocated. +// +//nolint:funlen +func wrapDecodeError(document []byte, de *decodeError) *DecodeError { + offset := danger.SubsliceOffset(document, de.highlight) + + errMessage := de.Error() + errLine, errColumn := positionAtEnd(document[:offset]) + before, after := linesOfContext(document, de.highlight, offset, 3) + + var buf strings.Builder + + maxLine := errLine + len(after) - 1 + lineColumnWidth := len(strconv.Itoa(maxLine)) + + // Write the lines of context strictly before the error. + for i := len(before) - 1; i > 0; i-- { + line := errLine - i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(before[i]) > 0 { + buf.WriteString(" ") + buf.Write(before[i]) + } + + buf.WriteRune('\n') + } + + // Write the document line that contains the error. + + buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.Write(before[0]) + } + + buf.Write(de.highlight) + + if len(after) > 0 { + buf.Write(after[0]) + } + + buf.WriteRune('\n') + + // Write the line with the error message itself (so it does not have a line + // number). + + buf.WriteString(strings.Repeat(" ", lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.WriteString(strings.Repeat(" ", len(before[0]))) + } + + buf.WriteString(strings.Repeat("~", len(de.highlight))) + + if len(errMessage) > 0 { + buf.WriteString(" ") + buf.WriteString(errMessage) + } + + // Write the lines of context strictly after the error. + + for i := 1; i < len(after); i++ { + buf.WriteRune('\n') + line := errLine + i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(after[i]) > 0 { + buf.WriteString(" ") + buf.Write(after[i]) + } + } + + return &DecodeError{ + message: errMessage, + line: errLine, + column: errColumn, + key: de.key, + human: buf.String(), + } +} + +func formatLineNumber(line int, width int) string { + format := "%" + strconv.Itoa(width) + "d" + + return fmt.Sprintf(format, line) +} + +func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { + return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) +} + +func beforeLines(document []byte, offset int, linesAround int) [][]byte { + var beforeLines [][]byte + + // Walk the document backward from the highlight to find previous lines + // of context. + rest := document[:offset] +backward: + for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { + switch { + case rest[o] == '\n': + // handle individual lines + beforeLines = append(beforeLines, rest[o+1:]) + rest = rest[:o] + o = len(rest) - 1 + case o == 0: + // add the first line only if it's non-empty + beforeLines = append(beforeLines, rest) + + break backward + default: + o-- + } + } + + return beforeLines +} + +func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { + var afterLines [][]byte + + // Walk the document forward from the highlight to find the following + // lines of context. + rest := document[offset+len(highlight):] +forward: + for o := 0; o < len(rest) && len(afterLines) <= linesAround; { + switch { + case rest[o] == '\n': + // handle individual lines + afterLines = append(afterLines, rest[:o]) + rest = rest[o+1:] + o = 0 + + case o == len(rest)-1: + // add last line only if it's non-empty + afterLines = append(afterLines, rest) + + break forward + default: + o++ + } + } + + return afterLines +} + +func positionAtEnd(b []byte) (row int, column int) { + row = 1 + column = 1 + + for _, c := range b { + if c == '\n' { + row++ + column = 1 + } else { + column++ + } + } + + return +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go new file mode 100644 index 0000000000..9dec2e0007 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go @@ -0,0 +1,144 @@ +package ast + +import ( + "fmt" + "unsafe" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// Iterator starts uninitialized, you need to call Next() first. +// +// For example: +// +// it := n.Children() +// for it.Next() { +// it.Node() +// } +type Iterator struct { + started bool + node *Node +} + +// Next moves the iterator forward and returns true if points to a +// node, false otherwise. +func (c *Iterator) Next() bool { + if !c.started { + c.started = true + } else if c.node.Valid() { + c.node = c.node.Next() + } + return c.node.Valid() +} + +// IsLast returns true if the current node of the iterator is the last +// one. Subsequent call to Next() will return false. +func (c *Iterator) IsLast() bool { + return c.node.next == 0 +} + +// Node returns a copy of the node pointed at by the iterator. +func (c *Iterator) Node() *Node { + return c.node +} + +// Root contains a full AST. +// +// It is immutable once constructed with Builder. +type Root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *Root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *Root) at(idx Reference) *Node { + return &r.nodes[idx] +} + +// Arrays have one child per element in the array. InlineTables have +// one child per key-value pair in the table. KeyValues have at least +// two children. The first one is the value. The rest make a +// potentially dotted key. Table and Array table have one child per +// element of the key they represent (same as KeyValue, but without +// the last node being the value). +type Node struct { + Kind Kind + Raw Range // Raw bytes from the input. + Data []byte // Node value (either allocated or referencing the input). + + // References to other nodes, as offsets in the backing array + // from this node. References can go backward, so those can be + // negative. + next int // 0 if last element + child int // 0 if no child +} + +type Range struct { + Offset uint32 + Length uint32 +} + +// Next returns a copy of the next node, or an invalid Node if there +// is no next node. +func (n *Node) Next() *Node { + if n.next == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.next)) +} + +// Child returns a copy of the first child node of this node. Other +// children can be accessed calling Next on the first child. Returns +// an invalid Node if there is none. +func (n *Node) Child() *Node { + if n.child == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.child)) +} + +// Valid returns true if the node's kind is set (not to Invalid). +func (n *Node) Valid() bool { + return n != nil +} + +// Key returns the child nodes making the Key on a supported +// node. Panics otherwise. They are guaranteed to be all be of the +// Kind Key. A simple key would return just one element. +func (n *Node) Key() Iterator { + switch n.Kind { + case KeyValue: + value := n.Child() + if !value.Valid() { + panic(fmt.Errorf("KeyValue should have at least two children")) + } + return Iterator{node: value.Next()} + case Table, ArrayTable: + return Iterator{node: n.Child()} + default: + panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) + } +} + +// Value returns a pointer to the value node of a KeyValue. +// Guaranteed to be non-nil. Panics if not called on a KeyValue node, +// or if the Children are malformed. +func (n *Node) Value() *Node { + return n.Child() +} + +// Children returns an iterator over a node's children. +func (n *Node) Children() Iterator { + return Iterator{node: n.Child()} +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go new file mode 100644 index 0000000000..120f16e5ce --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go @@ -0,0 +1,51 @@ +package ast + +type Reference int + +const InvalidReference Reference = -1 + +func (r Reference) Valid() bool { + return r != InvalidReference +} + +type Builder struct { + tree Root + lastIdx int +} + +func (b *Builder) Tree() *Root { + return &b.tree +} + +func (b *Builder) NodeAt(ref Reference) *Node { + return b.tree.at(ref) +} + +func (b *Builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *Builder) Push(n Node) Reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return Reference(b.lastIdx) +} + +func (b *Builder) PushAndChain(n Node) Reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return Reference(b.lastIdx) +} + +func (b *Builder) AttachChild(parent Reference, child Reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *Builder) Chain(from Reference, to Reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go new file mode 100644 index 0000000000..2b50c67fce --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go @@ -0,0 +1,69 @@ +package ast + +import "fmt" + +type Kind int + +const ( + // meta + Invalid Kind = iota + Comment + Key + + // top level structures + Table + ArrayTable + KeyValue + + // containers values + Array + InlineTable + + // values + String + Bool + Float + Integer + LocalDate + LocalTime + LocalDateTime + DateTime +) + +func (k Kind) String() string { + switch k { + case Invalid: + return "Invalid" + case Comment: + return "Comment" + case Key: + return "Key" + case Table: + return "Table" + case ArrayTable: + return "ArrayTable" + case KeyValue: + return "KeyValue" + case Array: + return "Array" + case InlineTable: + return "InlineTable" + case String: + return "String" + case Bool: + return "Bool" + case Float: + return "Float" + case Integer: + return "Integer" + case LocalDate: + return "LocalDate" + case LocalTime: + return "LocalTime" + case LocalDateTime: + return "LocalDateTime" + case DateTime: + return "DateTime" + } + panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go new file mode 100644 index 0000000000..e38e1131b8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go @@ -0,0 +1,65 @@ +package danger + +import ( + "fmt" + "reflect" + "unsafe" +) + +const maxInt = uintptr(int(^uint(0) >> 1)) + +func SubsliceOffset(data []byte, subslice []byte) int { + datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) + + if hlp.Data < datap.Data { + panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) + } + offset := hlp.Data - datap.Data + + if offset > maxInt { + panic(fmt.Errorf("slice offset larger than int (%d)", offset)) + } + + intoffset := int(offset) + + if intoffset > datap.Len { + panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) + } + + if intoffset+hlp.Len > datap.Len { + panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) + } + + return intoffset +} + +func BytesRange(start []byte, end []byte) []byte { + if start == nil || end == nil { + panic("cannot call BytesRange with nil") + } + startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) + endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) + + if startp.Data > endp.Data { + panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) + } + + l := startp.Len + endLen := int(endp.Data-startp.Data) + endp.Len + if endLen > l { + l = endLen + } + + if l > startp.Cap { + panic(fmt.Errorf("range length is larger than capacity")) + } + + return start[:l] +} + +func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { + // TODO: replace with unsafe.Add when Go 1.17 is released + // https://github.com/golang/go/issues/40481 + return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go new file mode 100644 index 0000000000..9d41c28a2f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go @@ -0,0 +1,23 @@ +package danger + +import ( + "reflect" + "unsafe" +) + +// typeID is used as key in encoder and decoder caches to enable using +// the optimize runtime.mapaccess2_fast64 function instead of the more +// expensive lookup if we were to use reflect.Type as map key. +// +// typeID holds the pointer to the reflect.Type value, which is unique +// in the program. +// +// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 +type TypeID unsafe.Pointer + +func MakeTypeID(t reflect.Type) TypeID { + // reflect.Type has the fields: + // typ unsafe.Pointer + // ptr unsafe.Pointer + return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go new file mode 100644 index 0000000000..7c148f48d4 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -0,0 +1,50 @@ +package tracker + +import ( + "github.com/pelletier/go-toml/v2/internal/ast" +) + +// KeyTracker is a tracker that keeps track of the current Key as the AST is +// walked. +type KeyTracker struct { + k []string +} + +// UpdateTable sets the state of the tracker with the AST table node. +func (t *KeyTracker) UpdateTable(node *ast.Node) { + t.reset() + t.Push(node) +} + +// UpdateArrayTable sets the state of the tracker with the AST array table node. +func (t *KeyTracker) UpdateArrayTable(node *ast.Node) { + t.reset() + t.Push(node) +} + +// Push the given key on the stack. +func (t *KeyTracker) Push(node *ast.Node) { + it := node.Key() + for it.Next() { + t.k = append(t.k, string(it.Node().Data)) + } +} + +// Pop key from stack. +func (t *KeyTracker) Pop(node *ast.Node) { + it := node.Key() + for it.Next() { + t.k = t.k[:len(t.k)-1] + } +} + +// Key returns the current key +func (t *KeyTracker) Key() []string { + k := make([]string, len(t.k)) + copy(k, t.k) + return k +} + +func (t *KeyTracker) reset() { + t.k = t.k[:0] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go new file mode 100644 index 0000000000..a7ee05ba65 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -0,0 +1,356 @@ +package tracker + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pelletier/go-toml/v2/internal/ast" +) + +type keyKind uint8 + +const ( + invalidKind keyKind = iota + valueKind + tableKind + arrayTableKind +) + +func (k keyKind) String() string { + switch k { + case invalidKind: + return "invalid" + case valueKind: + return "value" + case tableKind: + return "table" + case arrayTableKind: + return "array table" + } + panic("missing keyKind string mapping") +} + +// SeenTracker tracks which keys have been seen with which TOML type to flag +// duplicates and mismatches according to the spec. +// +// Each node in the visited tree is represented by an entry. Each entry has an +// identifier, which is provided by a counter. Entries are stored in the array +// entries. As new nodes are discovered (referenced for the first time in the +// TOML document), entries are created and appended to the array. An entry +// points to its parent using its id. +// +// To find whether a given key (sequence of []byte) has already been visited, +// the entries are linearly searched, looking for one with the right name and +// parent id. +// +// Given that all keys appear in the document after their parent, it is +// guaranteed that all descendants of a node are stored after the node, this +// speeds up the search process. +// +// When encountering [[array tables]], the descendants of that node are removed +// to allow that branch of the tree to be "rediscovered". To maintain the +// invariant above, the deletion process needs to keep the order of entries. +// This results in more copies in that case. +type SeenTracker struct { + entries []entry + currentIdx int +} + +var pool sync.Pool + +func (s *SeenTracker) reset() { + // Always contains a root element at index 0. + s.currentIdx = 0 + if len(s.entries) == 0 { + s.entries = make([]entry, 1, 2) + } else { + s.entries = s.entries[:1] + } + s.entries[0].child = -1 + s.entries[0].next = -1 +} + +type entry struct { + // Use -1 to indicate no child or no sibling. + child int + next int + + name []byte + kind keyKind + explicit bool + kv bool +} + +// Find the index of the child of parentIdx with key k. Returns -1 if +// it does not exist. +func (s *SeenTracker) find(parentIdx int, k []byte) int { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if bytes.Equal(s.entries[i].name, k) { + return i + } + } + return -1 +} + +// Remove all descendants of node at position idx. +func (s *SeenTracker) clear(idx int) { + if idx >= len(s.entries) { + return + } + + for i := s.entries[idx].child; i >= 0; { + next := s.entries[i].next + n := s.entries[0].next + s.entries[0].next = i + s.entries[i].next = n + s.entries[i].name = nil + s.clear(i) + i = next + } + + s.entries[idx].child = -1 +} + +func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { + e := entry{ + child: -1, + next: s.entries[parentIdx].child, + + name: name, + kind: kind, + explicit: explicit, + kv: kv, + } + var idx int + if s.entries[0].next >= 0 { + idx = s.entries[0].next + s.entries[0].next = s.entries[idx].next + s.entries[idx] = e + } else { + idx = len(s.entries) + s.entries = append(s.entries, e) + } + + s.entries[parentIdx].child = idx + + return idx +} + +func (s *SeenTracker) setExplicitFlag(parentIdx int) { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if s.entries[i].kv { + s.entries[i].explicit = true + s.entries[i].kv = false + } + s.setExplicitFlag(i) + } +} + +// CheckExpression takes a top-level node and checks that it does not contain +// keys that have been seen in previous calls, and validates that types are +// consistent. +func (s *SeenTracker) CheckExpression(node *ast.Node) error { + if s.entries == nil { + s.reset() + } + switch node.Kind { + case ast.KeyValue: + return s.checkKeyValue(node) + case ast.Table: + return s.checkTable(node) + case ast.ArrayTable: + return s.checkArrayTable(node) + default: + panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) + } +} + +func (s *SeenTracker) checkTable(node *ast.Node) error { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + // This code is duplicated in checkArrayTable. This is because factoring + // it in a function requires to copy the iterator, or allocate it to the + // heap, which is not cheap. + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + if idx >= 0 { + kind := s.entries[idx].kind + if kind != tableKind { + return fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + } + if s.entries[idx].explicit { + return fmt.Errorf("toml: table %s already exists", string(k)) + } + s.entries[idx].explicit = true + } else { + idx = s.create(parentIdx, k, tableKind, true, false) + } + + s.currentIdx = idx + + return nil +} + +func (s *SeenTracker) checkArrayTable(node *ast.Node) error { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + if idx >= 0 { + kind := s.entries[idx].kind + if kind != arrayTableKind { + return fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + } + s.clear(idx) + } else { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } + + s.currentIdx = idx + + return nil +} + +func (s *SeenTracker) checkKeyValue(node *ast.Node) error { + parentIdx := s.currentIdx + it := node.Key() + + for it.Next() { + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, true) + } else { + entry := s.entries[idx] + if it.IsLast() { + return fmt.Errorf("toml: key %s is already defined", string(k)) + } else if entry.kind != tableKind { + return fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } else if entry.explicit { + return fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + } + } + + parentIdx = idx + } + + s.entries[parentIdx].kind = valueKind + + value := node.Value() + + switch value.Kind { + case ast.InlineTable: + return s.checkInlineTable(value) + case ast.Array: + return s.checkArray(value) + } + + return nil +} + +func (s *SeenTracker) checkArray(node *ast.Node) error { + it := node.Children() + for it.Next() { + n := it.Node() + switch n.Kind { + case ast.InlineTable: + err := s.checkInlineTable(n) + if err != nil { + return err + } + case ast.Array: + err := s.checkArray(n) + if err != nil { + return err + } + } + } + return nil +} + +func (s *SeenTracker) checkInlineTable(node *ast.Node) error { + if pool.New == nil { + pool.New = func() interface{} { + return &SeenTracker{} + } + } + + s = pool.Get().(*SeenTracker) + s.reset() + + it := node.Children() + for it.Next() { + n := it.Node() + err := s.checkKeyValue(n) + if err != nil { + return err + } + } + + // As inline tables are self-contained, the tracker does not + // need to retain the details of what they contain. The + // keyValue element that creates the inline table is kept to + // mark the presence of the inline table and prevent + // redefinition of its keys: check* functions cannot walk into + // a value. + pool.Put(s) + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go new file mode 100644 index 0000000000..bf0317392f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go @@ -0,0 +1 @@ +package tracker diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go new file mode 100644 index 0000000000..30a31dcbde --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -0,0 +1,120 @@ +package toml + +import ( + "fmt" + "strings" + "time" +) + +// LocalDate represents a calendar day in no specific timezone. +type LocalDate struct { + Year int + Month int + Day int +} + +// AsTime converts d into a specific time instance at midnight in zone. +func (d LocalDate) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDate) UnmarshalText(b []byte) error { + res, err := parseLocalDate(b) + if err != nil { + return err + } + *d = res + return nil +} + +// LocalTime represents a time of day of no specific day in no specific +// timezone. +type LocalTime struct { + Hour int // Hour of the day: [0; 24[ + Minute int // Minute of the hour: [0; 60[ + Second int // Second of the minute: [0; 60[ + Nanosecond int // Nanoseconds within the second: [0, 1000000000[ + Precision int // Number of digits to display for Nanosecond. +} + +// String returns RFC 3339 representation of d. +// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond +// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number +// of digits for nanoseconds is provided. +func (d LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) + + if d.Precision > 0 { + s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] + } else if d.Nanosecond > 0 { + // Nanoseconds are specified, but precision is not provided. Use the + // minimum. + s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") + } + + return s +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalTime) UnmarshalText(b []byte) error { + res, left, err := parseLocalTime(b) + if err == nil && len(left) != 0 { + err = newDecodeError(left, "extra characters") + } + if err != nil { + return err + } + *d = res + return nil +} + +// LocalDateTime represents a time of a specific day in no specific timezone. +type LocalDateTime struct { + LocalDate + LocalTime +} + +// AsTime converts d into a specific time instance in zone. +func (d LocalDateTime) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDateTime) String() string { + return d.LocalDate.String() + "T" + d.LocalTime.String() +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDateTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDateTime) UnmarshalText(data []byte) error { + res, left, err := parseLocalDateTime(data) + if err == nil && len(left) != 0 { + err = newDecodeError(left, "extra characters") + } + if err != nil { + return err + } + + *d = res + return nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go new file mode 100644 index 0000000000..acb288315b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -0,0 +1,1040 @@ +package toml + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + "unicode" +) + +// Marshal serializes a Go value as a TOML document. +// +// It is a shortcut for Encoder.Encode() with the default options. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + err := enc.Encode(v) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Encoder writes a TOML document to an output stream. +type Encoder struct { + // output + w io.Writer + + // global settings + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool +} + +// NewEncoder returns a new Encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + indentSymbol: " ", + } +} + +// SetTablesInline forces the encoder to emit all tables inline. +// +// This behavior can be controlled on an individual struct field basis with the +// inline tag: +// +// MyField `toml:",inline"` +func (enc *Encoder) SetTablesInline(inline bool) *Encoder { + enc.tablesInline = inline + return enc +} + +// SetArraysMultiline forces the encoder to emit all arrays with one element per +// line. +// +// This behavior can be controlled on an individual struct field basis with the multiline tag: +// +// MyField `multiline:"true"` +func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { + enc.arraysMultiline = multiline + return enc +} + +// SetIndentSymbol defines the string that should be used for indentation. The +// provided string is repeated for each indentation level. Defaults to two +// spaces. +func (enc *Encoder) SetIndentSymbol(s string) *Encoder { + enc.indentSymbol = s + return enc +} + +// SetIndentTables forces the encoder to intent tables and array tables. +func (enc *Encoder) SetIndentTables(indent bool) *Encoder { + enc.indentTables = indent + return enc +} + +// Encode writes a TOML representation of v to the stream. +// +// If v cannot be represented to TOML it returns an error. +// +// # Encoding rules +// +// A top level slice containing only maps or structs is encoded as [[table +// array]]. +// +// All slices not matching rule 1 are encoded as [array]. As a result, any map +// or struct they contain is encoded as an {inline table}. +// +// Nil interfaces and nil pointers are not supported. +// +// Keys in key-values always have one part. +// +// Intermediate tables are always printed. +// +// By default, strings are encoded as literal string, unless they contain either +// a newline character or a single quote. In that case they are emitted as +// quoted strings. +// +// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so +// results in an error. This rule exists because the TOML specification only +// requires parsers to support at least the 64 bits integer range. Allowing +// larger numbers would create non-standard TOML documents, which may not be +// readable (at best) by other implementations. To encode such numbers, a +// solution is a custom type that implements encoding.TextMarshaler. +// +// When encoding structs, fields are encoded in order of definition, with their +// exact name. +// +// Tables and array tables are separated by empty lines. However, consecutive +// subtables definitions are not. For example: +// +// [top1] +// +// [top2] +// [top2.child1] +// +// [[array]] +// +// [[array]] +// [array.child2] +// +// # Struct tags +// +// The encoding of each public struct field can be customized by the format +// string in the "toml" key of the struct field's tag. This follows +// encoding/json's convention. The format string starts with the name of the +// field, optionally followed by a comma-separated list of options. The name may +// be empty in order to provide options without overriding the default name. +// +// The "multiline" option emits strings as quoted multi-line TOML strings. It +// has no effect on fields that would not be encoded as strings. +// +// The "inline" option turns fields that would be emitted as tables into inline +// tables instead. It has no effect on other fields. +// +// The "omitempty" option prevents empty values or groups from being emitted. +// +// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit +// a TOML comment before the value being annotated. Comments are ignored inside +// inline tables. For array tables, the comment is only present before the first +// element of the array. +func (enc *Encoder) Encode(v interface{}) error { + var ( + b []byte + ctx encoderCtx + ) + + ctx.inline = enc.tablesInline + + if v == nil { + return fmt.Errorf("toml: cannot encode a nil interface") + } + + b, err := enc.encode(b, ctx, reflect.ValueOf(v)) + if err != nil { + return err + } + + _, err = enc.w.Write(b) + if err != nil { + return fmt.Errorf("toml: cannot write: %w", err) + } + + return nil +} + +type valueOptions struct { + multiline bool + omitempty bool + comment string +} + +type encoderCtx struct { + // Current top-level key. + parentKey []string + + // Key that should be used for a KV. + key string + // Extra flag to account for the empty string + hasKey bool + + // Set to true to indicate that the encoder is inside a KV, so that all + // tables need to be inlined. + insideKv bool + + // Set to true to skip the first table header in an array table. + skipTableHeader bool + + // Should the next table be encoded as inline + inline bool + + // Indentation level + indent int + + // Options coming from struct tags + options valueOptions +} + +func (ctx *encoderCtx) shiftKey() { + if ctx.hasKey { + ctx.parentKey = append(ctx.parentKey, ctx.key) + ctx.clearKey() + } +} + +func (ctx *encoderCtx) setKey(k string) { + ctx.key = k + ctx.hasKey = true +} + +func (ctx *encoderCtx) clearKey() { + ctx.key = "" + ctx.hasKey = false +} + +func (ctx *encoderCtx) isRoot() bool { + return len(ctx.parentKey) == 0 && !ctx.hasKey +} + +func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + i := v.Interface() + + switch x := i.(type) { + case time.Time: + if x.Nanosecond() > 0 { + return x.AppendFormat(b, time.RFC3339Nano), nil + } + return x.AppendFormat(b, time.RFC3339), nil + case LocalTime: + return append(b, x.String()...), nil + case LocalDate: + return append(b, x.String()...), nil + case LocalDateTime: + return append(b, x.String()...), nil + } + + hasTextMarshaler := v.Type().Implements(textMarshalerType) + if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if !hasTextMarshaler { + v = v.Addr() + } + + if ctx.isRoot() { + return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) + } + + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return nil, err + } + + b = enc.encodeString(b, string(text), ctx.options) + + return b, nil + } + + switch v.Kind() { + // containers + case reflect.Map: + return enc.encodeMap(b, ctx, v) + case reflect.Struct: + return enc.encodeStruct(b, ctx, v) + case reflect.Slice: + return enc.encodeSlice(b, ctx, v) + case reflect.Interface: + if v.IsNil() { + return nil, fmt.Errorf("toml: encoding a nil interface is not supported") + } + + return enc.encode(b, ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) + } + + return enc.encode(b, ctx, v.Elem()) + + // values + case reflect.String: + b = enc.encodeString(b, v.String(), ctx.options) + case reflect.Float32: + f := v.Float() + + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat32 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat32 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 32) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 32) + } + case reflect.Float64: + f := v.Float() + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat64 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat64 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 64) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 64) + } + case reflect.Bool: + if v.Bool() { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: + x := v.Uint() + if x > uint64(math.MaxInt64) { + return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) + } + b = strconv.AppendUint(b, x, 10) + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + b = strconv.AppendInt(b, v.Int(), 10) + default: + return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) + } + + return b, nil +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Map: + return v.IsNil() + default: + return false + } +} + +func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { + return options.omitempty && isEmptyValue(v) +} + +func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { + var err error + + if !ctx.inline { + b = enc.encodeComment(ctx.indent, options.comment, b) + } + + b = enc.indent(ctx.indent, b) + b = enc.encodeKey(b, ctx.key) + b = append(b, " = "...) + + // create a copy of the context because the value of a KV shouldn't + // modify the global context. + subctx := ctx + subctx.insideKv = true + subctx.shiftKey() + subctx.options = options + + b, err = enc.encode(b, subctx, v) + if err != nil { + return nil, err + } + + return b, nil +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Struct: + return isEmptyStruct(v) + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func isEmptyStruct(v reflect.Value) bool { + // TODO: merge with walkStruct and cache. + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + f := v.Field(i) + + if !isEmptyValue(f) { + return false + } + } + + return true +} + +const literalQuote = '\'' + +func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { + if needsQuoting(v) { + return enc.encodeQuotedString(options.multiline, b, v) + } + + return enc.encodeLiteralString(b, v) +} + +func needsQuoting(v string) bool { + // TODO: vectorize + for _, b := range []byte(v) { + if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) { + return true + } + } + return false +} + +// caller should have checked that the string does not contain new lines or ' . +func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { + b = append(b, literalQuote) + b = append(b, v...) + b = append(b, literalQuote) + + return b +} + +func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { + stringQuote := `"` + + if multiline { + stringQuote = `"""` + } + + b = append(b, stringQuote...) + if multiline { + b = append(b, '\n') + } + + const ( + hextable = "0123456789ABCDEF" + // U+0000 to U+0008, U+000A to U+001F, U+007F + nul = 0x0 + bs = 0x8 + lf = 0xa + us = 0x1f + del = 0x7f + ) + + for _, r := range []byte(v) { + switch r { + case '\\': + b = append(b, `\\`...) + case '"': + b = append(b, `\"`...) + case '\b': + b = append(b, `\b`...) + case '\f': + b = append(b, `\f`...) + case '\n': + if multiline { + b = append(b, r) + } else { + b = append(b, `\n`...) + } + case '\r': + b = append(b, `\r`...) + case '\t': + b = append(b, `\t`...) + default: + switch { + case r >= nul && r <= bs, r >= lf && r <= us, r == del: + b = append(b, `\u00`...) + b = append(b, hextable[r>>4]) + b = append(b, hextable[r&0x0f]) + default: + b = append(b, r) + } + } + } + + b = append(b, stringQuote...) + + return b +} + +// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . +func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { + return append(b, v...) +} + +func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { + if len(ctx.parentKey) == 0 { + return b, nil + } + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + b = enc.indent(ctx.indent, b) + + b = append(b, '[') + + b = enc.encodeKey(b, ctx.parentKey[0]) + + for _, k := range ctx.parentKey[1:] { + b = append(b, '.') + b = enc.encodeKey(b, k) + } + + b = append(b, "]\n"...) + + return b, nil +} + +//nolint:cyclop +func (enc *Encoder) encodeKey(b []byte, k string) []byte { + needsQuotation := false + cannotUseLiteral := false + + if len(k) == 0 { + return append(b, "''"...) + } + + for _, c := range k { + if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { + continue + } + + if c == literalQuote { + cannotUseLiteral = true + } + + needsQuotation = true + } + + if needsQuotation && needsQuoting(k) { + cannotUseLiteral = true + } + + switch { + case cannotUseLiteral: + return enc.encodeQuotedString(false, b, k) + case needsQuotation: + return enc.encodeLiteralString(b, k) + default: + return enc.encodeUnquotedKey(b, k) + } +} + +func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Type().Key().Kind() != reflect.String { + return nil, fmt.Errorf("toml: type %s is not supported as a map key", v.Type().Key().Kind()) + } + + var ( + t table + emptyValueOptions valueOptions + ) + + iter := v.MapRange() + for iter.Next() { + k := iter.Key().String() + v := iter.Value() + + if isNil(v) { + continue + } + + if willConvertToTableOrArrayTable(ctx, v) { + t.pushTable(k, v, emptyValueOptions) + } else { + t.pushKV(k, v, emptyValueOptions) + } + } + + sortEntriesByKey(t.kvs) + sortEntriesByKey(t.tables) + + return enc.encodeTable(b, ctx, t) +} + +func sortEntriesByKey(e []entry) { + sort.Slice(e, func(i, j int) bool { + return e[i].Key < e[j].Key + }) +} + +type entry struct { + Key string + Value reflect.Value + Options valueOptions +} + +type table struct { + kvs []entry + tables []entry +} + +func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { + for _, e := range t.kvs { + if e.Key == k { + return + } + } + + t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) +} + +func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { + for _, e := range t.tables { + if e.Key == k { + return + } + } + t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) +} + +func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { + // TODO: cache this + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + k, opts := parseTag(tag) + if !isValidName(k) { + k = "" + } + + f := v.Field(i) + + if k == "" { + if fieldType.Anonymous { + if fieldType.Type.Kind() == reflect.Struct { + walkStruct(ctx, t, f) + } + continue + } else { + k = fieldType.Name + } + } + + if isNil(f) { + continue + } + + options := valueOptions{ + multiline: opts.multiline, + omitempty: opts.omitempty, + comment: fieldType.Tag.Get("comment"), + } + + if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { + t.pushKV(k, f, options) + } else { + t.pushTable(k, f, options) + } + } +} + +func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var t table + + walkStruct(ctx, &t, v) + + return enc.encodeTable(b, ctx, t) +} + +func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { + for len(comment) > 0 { + var line string + idx := strings.IndexByte(comment, '\n') + if idx >= 0 { + line = comment[:idx] + comment = comment[idx+1:] + } else { + line = comment + comment = "" + } + b = enc.indent(indent, b) + b = append(b, "# "...) + b = append(b, line...) + b = append(b, '\n') + } + return b +} + +func isValidName(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +type tagOptions struct { + multiline bool + inline bool + omitempty bool +} + +func parseTag(tag string) (string, tagOptions) { + opts := tagOptions{} + + idx := strings.Index(tag, ",") + if idx == -1 { + return tag, opts + } + + raw := tag[idx+1:] + tag = string(tag[:idx]) + for raw != "" { + var o string + i := strings.Index(raw, ",") + if i >= 0 { + o, raw = raw[:i], raw[i+1:] + } else { + o, raw = raw, "" + } + switch o { + case "multiline": + opts.multiline = true + case "inline": + opts.inline = true + case "omitempty": + opts.omitempty = true + } + } + + return tag, opts +} + +func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + ctx.shiftKey() + + if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { + return enc.encodeTableInline(b, ctx, t) + } + + if !ctx.skipTableHeader { + b, err = enc.encodeTableHeader(ctx, b) + if err != nil { + return nil, err + } + + if enc.indentTables && len(ctx.parentKey) > 0 { + ctx.indent++ + } + } + ctx.skipTableHeader = false + + hasNonEmptyKV := false + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + hasNonEmptyKV = true + + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + + b = append(b, '\n') + } + + first := true + for _, table := range t.tables { + if shouldOmitEmpty(table.Options, table.Value) { + continue + } + if first { + first = false + if hasNonEmptyKV { + b = append(b, '\n') + } + } else { + b = append(b, "\n"...) + } + + ctx.setKey(table.Key) + + ctx.options = table.Options + + b, err = enc.encode(b, ctx, table.Value) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + b = append(b, '{') + + first := true + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + + if first { + first = false + } else { + b = append(b, `, `...) + } + + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + } + + if len(t.tables) > 0 { + panic("inline table cannot contain nested tables, only key-values") + } + + b = append(b, "}"...) + + return b, nil +} + +func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { + if !v.IsValid() { + return false + } + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + return false + } + + t := v.Type() + switch t.Kind() { + case reflect.Map, reflect.Struct: + return !ctx.inline + case reflect.Interface: + return willConvertToTable(ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return false + } + + return willConvertToTable(ctx, v.Elem()) + default: + return false + } +} + +func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { + if ctx.insideKv { + return false + } + t := v.Type() + + if t.Kind() == reflect.Interface { + return willConvertToTableOrArrayTable(ctx, v.Elem()) + } + + if t.Kind() == reflect.Slice { + if v.Len() == 0 { + // An empty slice should be a kv = []. + return false + } + + for i := 0; i < v.Len(); i++ { + t := willConvertToTable(ctx, v.Index(i)) + + if !t { + return false + } + } + + return true + } + + return willConvertToTable(ctx, v) +} + +func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Len() == 0 { + b = append(b, "[]"...) + + return b, nil + } + + if willConvertToTableOrArrayTable(ctx, v) { + return enc.encodeSliceAsArrayTable(b, ctx, v) + } + + return enc.encodeSliceAsArray(b, ctx, v) +} + +// caller should have checked that v is a slice that only contains values that +// encode into tables. +func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + ctx.shiftKey() + + scratch := make([]byte, 0, 64) + scratch = append(scratch, "[["...) + + for i, k := range ctx.parentKey { + if i > 0 { + scratch = append(scratch, '.') + } + + scratch = enc.encodeKey(scratch, k) + } + + scratch = append(scratch, "]]\n"...) + ctx.skipTableHeader = true + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + for i := 0; i < v.Len(); i++ { + if i != 0 { + b = append(b, "\n"...) + } + + b = append(b, scratch...) + + var err error + b, err = enc.encode(b, ctx, v.Index(i)) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + multiline := ctx.options.multiline || enc.arraysMultiline + separator := ", " + + b = append(b, '[') + + subCtx := ctx + subCtx.options = valueOptions{} + + if multiline { + separator = ",\n" + + b = append(b, '\n') + + subCtx.indent++ + } + + var err error + first := true + + for i := 0; i < v.Len(); i++ { + if first { + first = false + } else { + b = append(b, separator...) + } + + if multiline { + b = enc.indent(subCtx.indent, b) + } + + b, err = enc.encode(b, subCtx, v.Index(i)) + if err != nil { + return nil, err + } + } + + if multiline { + b = append(b, '\n') + b = enc.indent(ctx.indent, b) + } + + b = append(b, ']') + + return b, nil +} + +func (enc *Encoder) indent(level int, b []byte) []byte { + for i := 0; i < level; i++ { + b = append(b, enc.indentSymbol...) + } + + return b +} diff --git a/vendor/github.com/pelletier/go-toml/v2/parser.go b/vendor/github.com/pelletier/go-toml/v2/parser.go new file mode 100644 index 0000000000..9859a795bd --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/parser.go @@ -0,0 +1,1086 @@ +package toml + +import ( + "bytes" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/danger" +) + +type parser struct { + builder ast.Builder + ref ast.Reference + data []byte + left []byte + err error + first bool +} + +func (p *parser) Range(b []byte) ast.Range { + return ast.Range{ + Offset: uint32(danger.SubsliceOffset(p.data, b)), + Length: uint32(len(b)), + } +} + +func (p *parser) Raw(raw ast.Range) []byte { + return p.data[raw.Offset : raw.Offset+raw.Length] +} + +func (p *parser) Reset(b []byte) { + p.builder.Reset() + p.ref = ast.InvalidReference + p.data = b + p.left = b + p.err = nil + p.first = true +} + +//nolint:cyclop +func (p *parser) NextExpression() bool { + if len(p.left) == 0 || p.err != nil { + return false + } + + p.builder.Reset() + p.ref = ast.InvalidReference + + for { + if len(p.left) == 0 || p.err != nil { + return false + } + + if !p.first { + p.left, p.err = p.parseNewline(p.left) + } + + if len(p.left) == 0 || p.err != nil { + return false + } + + p.ref, p.left, p.err = p.parseExpression(p.left) + + if p.err != nil { + return false + } + + p.first = false + + if p.ref.Valid() { + return true + } + } +} + +func (p *parser) Expression() *ast.Node { + return p.builder.NodeAt(p.ref) +} + +func (p *parser) Error() error { + return p.err +} + +func (p *parser) parseNewline(b []byte) ([]byte, error) { + if b[0] == '\n' { + return b[1:], nil + } + + if b[0] == '\r' { + _, rest, err := scanWindowsNewline(b) + return rest, err + } + + return nil, newDecodeError(b[0:1], "expected newline but got %#U", b[0]) +} + +func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) { + // expression = ws [ comment ] + // expression =/ ws keyval ws [ comment ] + // expression =/ ws table ws [ comment ] + ref := ast.InvalidReference + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ref, b, nil + } + + if b[0] == '#' { + _, rest, err := scanComment(b) + return ref, rest, err + } + + if b[0] == '\n' || b[0] == '\r' { + return ref, b, nil + } + + var err error + if b[0] == '[' { + ref, b, err = p.parseTable(b) + } else { + ref, b, err = p.parseKeyval(b) + } + + if err != nil { + return ref, nil, err + } + + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + _, rest, err := scanComment(b) + return ref, rest, err + } + + return ref, b, nil +} + +func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) { + // table = std-table / array-table + if len(b) > 1 && b[1] == '[' { + return p.parseArrayTable(b) + } + + return p.parseStdTable(b) +} + +func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) { + // array-table = array-table-open key array-table-close + // array-table-open = %x5B.5B ws ; [[ Double left square bracket + // array-table-close = ws %x5D.5D ; ]] Double right square bracket + ref := p.builder.Push(ast.Node{ + Kind: ast.ArrayTable, + }) + + b = b[2:] + b = p.parseWhitespace(b) + + k, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, k) + b = p.parseWhitespace(b) + + b, err = expect(']', b) + if err != nil { + return ref, nil, err + } + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) { + // std-table = std-table-open key std-table-close + // std-table-open = %x5B ws ; [ Left square bracket + // std-table-close = ws %x5D ; ] Right square bracket + ref := p.builder.Push(ast.Node{ + Kind: ast.Table, + }) + + b = b[1:] + b = p.parseWhitespace(b) + + key, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, key) + + b = p.parseWhitespace(b) + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { + // keyval = key keyval-sep val + ref := p.builder.Push(ast.Node{ + Kind: ast.KeyValue, + }) + + key, b, err := p.parseKey(b) + if err != nil { + return ast.InvalidReference, nil, err + } + + // keyval-sep = ws %x3D ws ; = + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ast.InvalidReference, nil, newDecodeError(b, "expected = after a key, but the document ends there") + } + + b, err = expect('=', b) + if err != nil { + return ast.InvalidReference, nil, err + } + + b = p.parseWhitespace(b) + + valRef, b, err := p.parseVal(b) + if err != nil { + return ref, b, err + } + + p.builder.Chain(valRef, key) + p.builder.AttachChild(ref, valRef) + + return ref, b, err +} + +//nolint:cyclop,funlen +func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { + // val = string / boolean / array / inline-table / date-time / float / integer + ref := ast.InvalidReference + + if len(b) == 0 { + return ref, nil, newDecodeError(b, "expected value, not eof") + } + + var err error + c := b[0] + + switch c { + case '"': + var raw []byte + var v []byte + if scanFollowsMultilineBasicStringDelimiter(b) { + raw, v, b, err = p.parseMultilineBasicString(b) + } else { + raw, v, b, err = p.parseBasicString(b) + } + + if err == nil { + ref = p.builder.Push(ast.Node{ + Kind: ast.String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case '\'': + var raw []byte + var v []byte + if scanFollowsMultilineLiteralStringDelimiter(b) { + raw, v, b, err = p.parseMultilineLiteralString(b) + } else { + raw, v, b, err = p.parseLiteralString(b) + } + + if err == nil { + ref = p.builder.Push(ast.Node{ + Kind: ast.String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case 't': + if !scanFollowsTrue(b) { + return ref, nil, newDecodeError(atmost(b, 4), "expected 'true'") + } + + ref = p.builder.Push(ast.Node{ + Kind: ast.Bool, + Data: b[:4], + }) + + return ref, b[4:], nil + case 'f': + if !scanFollowsFalse(b) { + return ref, nil, newDecodeError(atmost(b, 5), "expected 'false'") + } + + ref = p.builder.Push(ast.Node{ + Kind: ast.Bool, + Data: b[:5], + }) + + return ref, b[5:], nil + case '[': + return p.parseValArray(b) + case '{': + return p.parseInlineTable(b) + default: + return p.parseIntOrFloatOrDateTime(b) + } +} + +func atmost(b []byte, n int) []byte { + if n >= len(b) { + return b + } + + return b[:n] +} + +func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { + v, rest, err := scanLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + return v, v[1 : len(v)-1], rest, nil +} + +func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { + // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + // inline-table-open = %x7B ws ; { + // inline-table-close = ws %x7D ; } + // inline-table-sep = ws %x2C ws ; , Comma + // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + parent := p.builder.Push(ast.Node{ + Kind: ast.InlineTable, + }) + + first := true + + var child ast.Reference + + b = b[1:] + + var err error + + for len(b) > 0 { + previousB := b + b = p.parseWhitespace(b) + + if len(b) == 0 { + return parent, nil, newDecodeError(previousB[:1], "inline table is incomplete") + } + + if b[0] == '}' { + break + } + + if !first { + b, err = expect(',', b) + if err != nil { + return parent, nil, err + } + b = p.parseWhitespace(b) + } + + var kv ast.Reference + + kv, b, err = p.parseKeyval(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, kv) + } else { + p.builder.Chain(child, kv) + } + child = kv + + first = false + } + + rest, err := expect('}', b) + + return parent, rest, err +} + +//nolint:funlen,cyclop +func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { + // array = array-open [ array-values ] ws-comment-newline array-close + // array-open = %x5B ; [ + // array-close = %x5D ; ] + // array-values = ws-comment-newline val ws-comment-newline array-sep array-values + // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + // array-sep = %x2C ; , Comma + // ws-comment-newline = *( wschar / [ comment ] newline ) + arrayStart := b + b = b[1:] + + parent := p.builder.Push(ast.Node{ + Kind: ast.Array, + }) + + first := true + + var lastChild ast.Reference + + var err error + for len(b) > 0 { + b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + + if len(b) == 0 { + return parent, nil, newDecodeError(arrayStart[:1], "array is incomplete") + } + + if b[0] == ']' { + break + } + + if b[0] == ',' { + if first { + return parent, nil, newDecodeError(b[0:1], "array cannot start with comma") + } + b = b[1:] + + b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + } else if !first { + return parent, nil, newDecodeError(b[0:1], "array elements must be separated by commas") + } + + // TOML allows trailing commas in arrays. + if len(b) > 0 && b[0] == ']' { + break + } + + var valueRef ast.Reference + valueRef, b, err = p.parseVal(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, valueRef) + } else { + p.builder.Chain(lastChild, valueRef) + } + lastChild = valueRef + + b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + first = false + } + + rest, err := expect(']', b) + + return parent, rest, err +} + +func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) { + for len(b) > 0 { + var err error + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + _, b, err = scanComment(b) + if err != nil { + return nil, err + } + } + + if len(b) == 0 { + break + } + + if b[0] == '\n' || b[0] == '\r' { + b, err = p.parseNewline(b) + if err != nil { + return nil, err + } + } else { + break + } + } + + return b, nil +} + +func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { + token, rest, err := scanMultilineLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + return token, token[i : len(token)-3], rest, err +} + +//nolint:funlen,gocognit,cyclop +func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + token, escaped, rest, err := scanMultilineBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + // fast path + startIdx := i + endIdx := len(token) - len(`"""`) + + if !escaped { + str := token[startIdx:endIdx] + verr := utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-3 { + c := token[i] + + //nolint:nestif + if c == '\\' { + // When the last non-whitespace character on a line is an unescaped \, + // it will be trimmed along with all whitespace (including newlines) up + // to the next non-whitespace character or closing delimiter. + + isLastNonWhitespaceOnLine := false + j := 1 + findEOLLoop: + for ; j < len(token)-3-i; j++ { + switch token[i+j] { + case ' ', '\t': + continue + case '\r': + if token[i+j+1] == '\n' { + continue + } + case '\n': + isLastNonWhitespaceOnLine = true + } + break findEOLLoop + } + if isLastNonWhitespaceOnLine { + i += j + for ; i < len(token)-3; i++ { + c := token[i] + if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { + i-- + break + } + } + i++ + continue + } + + // handle escaping + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(atmost(token[i+1:], 4), 4) + if err != nil { + return nil, nil, nil, err + } + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(atmost(token[i+1:], 8), 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { + // key = simple-key / dotted-key + // simple-key = quoted-key / unquoted-key + // + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + // dotted-key = simple-key 1*( dot-sep simple-key ) + // + // dot-sep = ws %x2E ws ; . Period + raw, key, b, err := p.parseSimpleKey(b) + if err != nil { + return ast.InvalidReference, nil, err + } + + ref := p.builder.Push(ast.Node{ + Kind: ast.Key, + Raw: p.Range(raw), + Data: key, + }) + + for { + b = p.parseWhitespace(b) + if len(b) > 0 && b[0] == '.' { + b = p.parseWhitespace(b[1:]) + + raw, key, b, err = p.parseSimpleKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.PushAndChain(ast.Node{ + Kind: ast.Key, + Raw: p.Range(raw), + Data: key, + }) + } else { + break + } + } + + return ref, b, nil +} + +func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { + if len(b) == 0 { + return nil, nil, nil, newDecodeError(b, "expected key but found none") + } + + // simple-key = quoted-key / unquoted-key + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + switch { + case b[0] == '\'': + return p.parseLiteralString(b) + case b[0] == '"': + return p.parseBasicString(b) + case isUnquotedKeyChar(b[0]): + key, rest = scanUnquotedKey(b) + return key, key, rest, nil + default: + return nil, nil, nil, newDecodeError(b[0:1], "invalid character at start of key: %c", b[0]) + } +} + +//nolint:funlen,cyclop +func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + // escape-seq-char = %x22 ; " quotation mark U+0022 + // escape-seq-char =/ %x5C ; \ reverse solidus U+005C + // escape-seq-char =/ %x62 ; b backspace U+0008 + // escape-seq-char =/ %x66 ; f form feed U+000C + // escape-seq-char =/ %x6E ; n line feed U+000A + // escape-seq-char =/ %x72 ; r carriage return U+000D + // escape-seq-char =/ %x74 ; t tab U+0009 + // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX + // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + token, escaped, rest, err := scanBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + startIdx := len(`"`) + endIdx := len(token) - len(`"`) + + // Fast path. If there is no escape sequence, the string should just be + // an UTF-8 encoded string, which is the same as Go. In that case, + // validate the string and return a direct reference to the buffer. + if !escaped { + str := token[startIdx:endIdx] + verr := utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + i := startIdx + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-1 { + c := token[i] + if c == '\\' { + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(token[i+1:len(token)-1], 4) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(token[i+1:len(token)-1], 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func hexToRune(b []byte, length int) (rune, error) { + if len(b) < length { + return -1, newDecodeError(b, "unicode point needs %d character, not %d", length, len(b)) + } + b = b[:length] + + var r uint32 + for i, c := range b { + d := uint32(0) + switch { + case '0' <= c && c <= '9': + d = uint32(c - '0') + case 'a' <= c && c <= 'f': + d = uint32(c - 'a' + 10) + case 'A' <= c && c <= 'F': + d = uint32(c - 'A' + 10) + default: + return -1, newDecodeError(b[i:i+1], "non-hex character") + } + r = r*16 + d + } + + if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { + return -1, newDecodeError(b, "escape sequence is invalid Unicode code point") + } + + return rune(r), nil +} + +func (p *parser) parseWhitespace(b []byte) []byte { + // ws = *wschar + // wschar = %x20 ; Space + // wschar =/ %x09 ; Horizontal tab + _, rest := scanWhitespace(b) + + return rest +} + +//nolint:cyclop +func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, error) { + switch b[0] { + case 'i': + if !scanFollowsInf(b) { + return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'inf'") + } + + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:3], + }), b[3:], nil + case 'n': + if !scanFollowsNan(b) { + return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'nan'") + } + + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:3], + }), b[3:], nil + case '+', '-': + return p.scanIntOrFloat(b) + } + + if len(b) < 3 { + return p.scanIntOrFloat(b) + } + + s := 5 + if len(b) < s { + s = len(b) + } + + for idx, c := range b[:s] { + if isDigit(c) { + continue + } + + if idx == 2 && c == ':' || (idx == 4 && c == '-') { + return p.scanDateTime(b) + } + + break + } + + return p.scanIntOrFloat(b) +} + +func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) { + // scans for contiguous characters in [0-9T:Z.+-], and up to one space if + // followed by a digit. + hasDate := false + hasTime := false + hasTz := false + seenSpace := false + + i := 0 +byteLoop: + for ; i < len(b); i++ { + c := b[i] + + switch { + case isDigit(c): + case c == '-': + hasDate = true + const minOffsetOfTz = 8 + if i >= minOffsetOfTz { + hasTz = true + } + case c == 'T' || c == 't' || c == ':' || c == '.': + hasTime = true + case c == '+' || c == '-' || c == 'Z' || c == 'z': + hasTz = true + case c == ' ': + if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { + i += 2 + // Avoid reaching past the end of the document in case the time + // is malformed. See TestIssue585. + if i >= len(b) { + i-- + } + seenSpace = true + hasTime = true + } else { + break byteLoop + } + default: + break byteLoop + } + } + + var kind ast.Kind + + if hasTime { + if hasDate { + if hasTz { + kind = ast.DateTime + } else { + kind = ast.LocalDateTime + } + } else { + kind = ast.LocalTime + } + } else { + kind = ast.LocalDate + } + + return p.builder.Push(ast.Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +//nolint:funlen,gocognit,cyclop +func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { + i := 0 + + if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { + var isValidRune validRuneFn + + switch b[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + i++ + } + + if isValidRune != nil { + i += 2 + for ; i < len(b); i++ { + if !isValidRune(b[i]) { + break + } + } + } + + return p.builder.Push(ast.Node{ + Kind: ast.Integer, + Data: b[:i], + }), b[i:], nil + } + + isFloat := false + + for ; i < len(b); i++ { + c := b[i] + + if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { + continue + } + + if c == '.' || c == 'e' || c == 'E' { + isFloat = true + + continue + } + + if c == 'i' { + if scanFollowsInf(b[i:]) { + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:i+3], + }), b[i+3:], nil + } + + return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'i' while scanning for a number") + } + + if c == 'n' { + if scanFollowsNan(b[i:]) { + return p.builder.Push(ast.Node{ + Kind: ast.Float, + Data: b[:i+3], + }), b[i+3:], nil + } + + return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'n' while scanning for a number") + } + + break + } + + if i == 0 { + return ast.InvalidReference, b, newDecodeError(b, "incomplete number") + } + + kind := ast.Integer + + if isFloat { + kind = ast.Float + } + + return p.builder.Push(ast.Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} + +type validRuneFn func(r byte) bool + +func isValidHexRune(r byte) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r byte) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r byte) bool { + return r == '0' || r == '1' || r == '_' +} + +func expect(x byte, b []byte) ([]byte, error) { + if len(b) == 0 { + return nil, newDecodeError(b, "expected character %c but the document ended here", x) + } + + if b[0] != x { + return nil, newDecodeError(b[0:1], "expected character %c", x) + } + + return b[1:], nil +} diff --git a/vendor/github.com/pelletier/go-toml/v2/scanner.go b/vendor/github.com/pelletier/go-toml/v2/scanner.go new file mode 100644 index 0000000000..bb445fab4c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/scanner.go @@ -0,0 +1,269 @@ +package toml + +func scanFollows(b []byte, pattern string) bool { + n := len(pattern) + + return len(b) >= n && string(b[:n]) == pattern +} + +func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { + return scanFollows(b, `"""`) +} + +func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { + return scanFollows(b, `'''`) +} + +func scanFollowsTrue(b []byte) bool { + return scanFollows(b, `true`) +} + +func scanFollowsFalse(b []byte) bool { + return scanFollows(b, `false`) +} + +func scanFollowsInf(b []byte) bool { + return scanFollows(b, `inf`) +} + +func scanFollowsNan(b []byte) bool { + return scanFollows(b, `nan`) +} + +func scanUnquotedKey(b []byte) ([]byte, []byte) { + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + for i := 0; i < len(b); i++ { + if !isUnquotedKeyChar(b[i]) { + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func isUnquotedKeyChar(r byte) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' +} + +func scanLiteralString(b []byte) ([]byte, []byte, error) { + // literal-string = apostrophe *literal-char apostrophe + // apostrophe = %x27 ; ' apostrophe + // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + for i := 1; i < len(b); { + switch b[i] { + case '\'': + return b[:i+1], b[i+1:], nil + case '\n', '\r': + return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines") + } + size := utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, newDecodeError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, newDecodeError(b[len(b):], "unterminated literal string") +} + +func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { + // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + // ml-literal-string-delim + // ml-literal-string-delim = 3apostrophe + // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + // + // mll-content = mll-char / newline + // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii + // mll-quotes = 1*2apostrophe + for i := 3; i < len(b); { + switch b[i] { + case '\'': + if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i < len(b) && b[i] == '\'' { + return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string") + } + + return b[:i], b[i:], nil + } + case '\r': + if len(b) < i+2 { + return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + } + i += 2 // skip the \n + continue + } + size := utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, newDecodeError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, newDecodeError(b[len(b):], `multiline literal string not terminated by '''`) +} + +func scanWindowsNewline(b []byte) ([]byte, []byte, error) { + const lenCRLF = 2 + if len(b) < lenCRLF { + return nil, nil, newDecodeError(b, "windows new line expected") + } + + if b[1] != '\n' { + return nil, nil, newDecodeError(b, `windows new line should be \r\n`) + } + + return b[:lenCRLF], b[lenCRLF:], nil +} + +func scanWhitespace(b []byte) ([]byte, []byte) { + for i := 0; i < len(b); i++ { + switch b[i] { + case ' ', '\t': + continue + default: + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +//nolint:unparam +func scanComment(b []byte) ([]byte, []byte, error) { + // comment-start-symbol = %x23 ; # + // non-ascii = %x80-D7FF / %xE000-10FFFF + // non-eol = %x09 / %x20-7F / non-ascii + // + // comment = comment-start-symbol *non-eol + + for i := 1; i < len(b); { + if b[i] == '\n' { + return b[:i], b[i:], nil + } + if b[i] == '\r' { + if i+1 < len(b) && b[i+1] == '\n' { + return b[:i+1], b[i+1:], nil + } + return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + } + size := utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + } + + i += size + } + + return b, b[len(b):], nil +} + +func scanBasicString(b []byte) ([]byte, bool, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + escaped := false + i := 1 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + return b[:i+1], escaped, b[i+1:], nil + case '\n', '\r': + return nil, escaped, nil, newDecodeError(b[i:i+1], "basic strings cannot have new lines") + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, newDecodeError(b[i:i+1], "need a character after \\") + } + escaped = true + i++ // skip the next character + } + } + + return nil, escaped, nil, newDecodeError(b[len(b):], `basic string not terminated by "`) +} + +func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + + escaped := false + i := 3 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + if scanFollowsMultilineBasicStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i < len(b) && b[i] == '"' { + return nil, escaped, nil, newDecodeError(b[i-3:i+1], `""" not allowed in multiline basic string`) + } + + return b[:i], escaped, b[i:], nil + } + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, newDecodeError(b[len(b):], "need a character after \\") + } + escaped = true + i++ // skip the next character + case '\r': + if len(b) < i+2 { + return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + } + i++ // skip the \n + } + } + + return nil, escaped, nil, newDecodeError(b[len(b):], `multiline basic string not terminated by """`) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go new file mode 100644 index 0000000000..b7830d139c --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -0,0 +1,107 @@ +package toml + +import ( + "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" +) + +type strict struct { + Enabled bool + + // Tracks the current key being processed. + key tracker.KeyTracker + + missing []decodeError +} + +func (s *strict) EnterTable(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.UpdateTable(node) +} + +func (s *strict) EnterArrayTable(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.UpdateArrayTable(node) +} + +func (s *strict) EnterKeyValue(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.Push(node) +} + +func (s *strict) ExitKeyValue(node *ast.Node) { + if !s.Enabled { + return + } + + s.key.Pop(node) +} + +func (s *strict) MissingTable(node *ast.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, decodeError{ + highlight: keyLocation(node), + message: "missing table", + key: s.key.Key(), + }) +} + +func (s *strict) MissingField(node *ast.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, decodeError{ + highlight: keyLocation(node), + message: "missing field", + key: s.key.Key(), + }) +} + +func (s *strict) Error(doc []byte) error { + if !s.Enabled || len(s.missing) == 0 { + return nil + } + + err := &StrictMissingError{ + Errors: make([]DecodeError, 0, len(s.missing)), + } + + for _, derr := range s.missing { + derr := derr + err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) + } + + return err +} + +func keyLocation(node *ast.Node) []byte { + k := node.Key() + + hasOne := k.Next() + if !hasOne { + panic("should not be called with empty key") + } + + start := k.Node().Data + end := k.Node().Data + + for k.Next() { + end = k.Node().Data + } + + return danger.BytesRange(start, end) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/vendor/github.com/pelletier/go-toml/v2/toml.abnf new file mode 100644 index 0000000000..473f3749e8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/toml.abnf @@ -0,0 +1,243 @@ +;; This document describes TOML's syntax, using the ABNF format (defined in +;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). +;; +;; All valid TOML documents will match this description, however certain +;; invalid documents would need to be rejected as per the semantics described +;; in the supporting text description. + +;; It is possible to try this grammar interactively, using instaparse. +;; http://instaparse.mojombo.com/ +;; +;; To do so, in the lower right, click on Options and change `:input-format` to +;; ':abnf'. Then paste this entire ABNF document into the grammar entry box +;; (above the options). Then you can type or paste a sample TOML document into +;; the beige box on the left. Tada! + +;; Overall Structure + +toml = expression *( newline expression ) + +expression = ws [ comment ] +expression =/ ws keyval ws [ comment ] +expression =/ ws table ws [ comment ] + +;; Whitespace + +ws = *wschar +wschar = %x20 ; Space +wschar =/ %x09 ; Horizontal tab + +;; Newline + +newline = %x0A ; LF +newline =/ %x0D.0A ; CRLF + +;; Comment + +comment-start-symbol = %x23 ; # +non-ascii = %x80-D7FF / %xE000-10FFFF +non-eol = %x09 / %x20-7F / non-ascii + +comment = comment-start-symbol *non-eol + +;; Key-Value pairs + +keyval = key keyval-sep val + +key = simple-key / dotted-key +simple-key = quoted-key / unquoted-key + +unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ +quoted-key = basic-string / literal-string +dotted-key = simple-key 1*( dot-sep simple-key ) + +dot-sep = ws %x2E ws ; . Period +keyval-sep = ws %x3D ws ; = + +val = string / boolean / array / inline-table / date-time / float / integer + +;; String + +string = ml-basic-string / basic-string / ml-literal-string / literal-string + +;; Basic String + +basic-string = quotation-mark *basic-char quotation-mark + +quotation-mark = %x22 ; " + +basic-char = basic-unescaped / escaped +basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +escaped = escape escape-seq-char + +escape = %x5C ; \ +escape-seq-char = %x22 ; " quotation mark U+0022 +escape-seq-char =/ %x5C ; \ reverse solidus U+005C +escape-seq-char =/ %x62 ; b backspace U+0008 +escape-seq-char =/ %x66 ; f form feed U+000C +escape-seq-char =/ %x6E ; n line feed U+000A +escape-seq-char =/ %x72 ; r carriage return U+000D +escape-seq-char =/ %x74 ; t tab U+0009 +escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX +escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + +;; Multiline Basic String + +ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + ml-basic-string-delim +ml-basic-string-delim = 3quotation-mark +ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + +mlb-content = mlb-char / newline / mlb-escaped-nl +mlb-char = mlb-unescaped / escaped +mlb-quotes = 1*2quotation-mark +mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +mlb-escaped-nl = escape ws newline *( wschar / newline ) + +;; Literal String + +literal-string = apostrophe *literal-char apostrophe + +apostrophe = %x27 ; ' apostrophe + +literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + +;; Multiline Literal String + +ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + ml-literal-string-delim +ml-literal-string-delim = 3apostrophe +ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + +mll-content = mll-char / newline +mll-char = %x09 / %x20-26 / %x28-7E / non-ascii +mll-quotes = 1*2apostrophe + +;; Integer + +integer = dec-int / hex-int / oct-int / bin-int + +minus = %x2D ; - +plus = %x2B ; + +underscore = %x5F ; _ +digit1-9 = %x31-39 ; 1-9 +digit0-7 = %x30-37 ; 0-7 +digit0-1 = %x30-31 ; 0-1 + +hex-prefix = %x30.78 ; 0x +oct-prefix = %x30.6F ; 0o +bin-prefix = %x30.62 ; 0b + +dec-int = [ minus / plus ] unsigned-dec-int +unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) + +hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) +oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) +bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) + +;; Float + +float = float-int-part ( exp / frac [ exp ] ) +float =/ special-float + +float-int-part = dec-int +frac = decimal-point zero-prefixable-int +decimal-point = %x2E ; . +zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) + +exp = "e" float-exp-part +float-exp-part = [ minus / plus ] zero-prefixable-int + +special-float = [ minus / plus ] ( inf / nan ) +inf = %x69.6e.66 ; inf +nan = %x6e.61.6e ; nan + +;; Boolean + +boolean = true / false + +true = %x74.72.75.65 ; true +false = %x66.61.6C.73.65 ; false + +;; Date and Time (as defined in RFC 3339) + +date-time = offset-date-time / local-date-time / local-date / local-time + +date-fullyear = 4DIGIT +date-month = 2DIGIT ; 01-12 +date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year +time-delim = "T" / %x20 ; T, t, or space +time-hour = 2DIGIT ; 00-23 +time-minute = 2DIGIT ; 00-59 +time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules +time-secfrac = "." 1*DIGIT +time-numoffset = ( "+" / "-" ) time-hour ":" time-minute +time-offset = "Z" / time-numoffset + +partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] +full-date = date-fullyear "-" date-month "-" date-mday +full-time = partial-time time-offset + +;; Offset Date-Time + +offset-date-time = full-date time-delim full-time + +;; Local Date-Time + +local-date-time = full-date time-delim partial-time + +;; Local Date + +local-date = full-date + +;; Local Time + +local-time = partial-time + +;; Array + +array = array-open [ array-values ] ws-comment-newline array-close + +array-open = %x5B ; [ +array-close = %x5D ; ] + +array-values = ws-comment-newline val ws-comment-newline array-sep array-values +array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + +array-sep = %x2C ; , Comma + +ws-comment-newline = *( wschar / [ comment ] newline ) + +;; Table + +table = std-table / array-table + +;; Standard Table + +std-table = std-table-open key std-table-close + +std-table-open = %x5B ws ; [ Left square bracket +std-table-close = ws %x5D ; ] Right square bracket + +;; Inline Table + +inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + +inline-table-open = %x7B ws ; { +inline-table-close = ws %x7D ; } +inline-table-sep = ws %x2C ws ; , Comma + +inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + +;; Array Table + +array-table = array-table-open key array-table-close + +array-table-open = %x5B.5B ws ; [[ Double left square bracket +array-table-close = ws %x5D.5D ; ]] Double right square bracket + +;; Built-in ABNF terms, reproduced here for clarity + +ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +DIGIT = %x30-39 ; 0-9 +HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go new file mode 100644 index 0000000000..630a454663 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/types.go @@ -0,0 +1,14 @@ +package toml + +import ( + "encoding" + "reflect" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) +var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) +var sliceInterfaceType = reflect.TypeOf([]interface{}{}) +var stringType = reflect.TypeOf("") diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go new file mode 100644 index 0000000000..d0d7a72d08 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -0,0 +1,1227 @@ +package toml + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "sync/atomic" + "time" + + "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" +) + +// Unmarshal deserializes a TOML document into a Go value. +// +// It is a shortcut for Decoder.Decode() with the default options. +func Unmarshal(data []byte, v interface{}) error { + p := parser{} + p.Reset(data) + d := decoder{p: &p} + + return d.FromParser(v) +} + +// Decoder reads and decode a TOML document from an input stream. +type Decoder struct { + // input + r io.Reader + + // global settings + strict bool +} + +// NewDecoder creates a new Decoder that will read from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// DisallowUnknownFields causes the Decoder to return an error when the +// destination is a struct and the input contains a key that does not match a +// non-ignored field. +// +// In that case, the Decoder returns a StrictMissingError that can be used to +// retrieve the individual errors as well as generate a human readable +// description of the missing fields. +func (d *Decoder) DisallowUnknownFields() *Decoder { + d.strict = true + return d +} + +// Decode the whole content of r into v. +// +// By default, values in the document that don't exist in the target Go value +// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. +// +// When a TOML local date, time, or date-time is decoded into a time.Time, its +// value is represented in time.Local timezone. Otherwise the approriate Local* +// structure is used. For time values, precision up to the nanosecond is +// supported by truncating extra digits. +// +// Empty tables decoded in an interface{} create an empty initialized +// map[string]interface{}. +// +// Types implementing the encoding.TextUnmarshaler interface are decoded from a +// TOML string. +// +// When decoding a number, go-toml will return an error if the number is out of +// bounds for the target type (which includes negative numbers when decoding +// into an unsigned int). +// +// If an error occurs while decoding the content of the document, this function +// returns a toml.DecodeError, providing context about the issue. When using +// strict mode and a field is missing, a `toml.StrictMissingError` is +// returned. In any other case, this function returns a standard Go error. +// +// # Type mapping +// +// List of supported TOML types and their associated accepted Go types: +// +// String -> string +// Integer -> uint*, int*, depending on size +// Float -> float*, depending on size +// Boolean -> bool +// Offset Date-Time -> time.Time +// Local Date-time -> LocalDateTime, time.Time +// Local Date -> LocalDate, time.Time +// Local Time -> LocalTime, time.Time +// Array -> slice and array, depending on elements types +// Table -> map and struct +// Inline Table -> same as Table +// Array of Tables -> same as Array and Table +func (d *Decoder) Decode(v interface{}) error { + b, err := ioutil.ReadAll(d.r) + if err != nil { + return fmt.Errorf("toml: %w", err) + } + + p := parser{} + p.Reset(b) + dec := decoder{ + p: &p, + strict: strict{ + Enabled: d.strict, + }, + } + + return dec.FromParser(v) +} + +type decoder struct { + // Which parser instance in use for this decoding session. + p *parser + + // Flag indicating that the current expression is stashed. + // If set to true, calling nextExpr will not actually pull a new expression + // but turn off the flag instead. + stashedExpr bool + + // Skip expressions until a table is found. This is set to true when a + // table could not be created (missing field in map), so all KV expressions + // need to be skipped. + skipUntilTable bool + + // Tracks position in Go arrays. + // This is used when decoding [[array tables]] into Go arrays. Given array + // tables are separate TOML expression, we need to keep track of where we + // are at in the Go array, as we can't just introspect its size. + arrayIndexes map[reflect.Value]int + + // Tracks keys that have been seen, with which type. + seen tracker.SeenTracker + + // Strict mode + strict strict + + // Current context for the error. + errorContext *errorContext +} + +type errorContext struct { + Struct reflect.Type + Field []int +} + +func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { + if d.errorContext != nil && d.errorContext.Struct != nil { + ctx := d.errorContext + f := ctx.Struct.FieldByIndex(ctx.Field) + return fmt.Errorf("toml: cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) + } + return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target) +} + +func (d *decoder) expr() *ast.Node { + return d.p.Expression() +} + +func (d *decoder) nextExpr() bool { + if d.stashedExpr { + d.stashedExpr = false + return true + } + return d.p.NextExpression() +} + +func (d *decoder) stashExpr() { + d.stashedExpr = true +} + +func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { + if d.arrayIndexes == nil { + d.arrayIndexes = make(map[reflect.Value]int, 1) + } + + idx, ok := d.arrayIndexes[v] + + if !ok { + d.arrayIndexes[v] = 0 + } else if shouldAppend { + idx++ + d.arrayIndexes[v] = idx + } + + return idx +} + +func (d *decoder) FromParser(v interface{}) error { + r := reflect.ValueOf(v) + if r.Kind() != reflect.Ptr { + return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) + } + + if r.IsNil() { + return fmt.Errorf("toml: decoding pointer target cannot be nil") + } + + r = r.Elem() + if r.Kind() == reflect.Interface && r.IsNil() { + newMap := map[string]interface{}{} + r.Set(reflect.ValueOf(newMap)) + } + + err := d.fromParser(r) + if err == nil { + return d.strict.Error(d.p.data) + } + + var e *decodeError + if errors.As(err, &e) { + return wrapDecodeError(d.p.data, e) + } + + return err +} + +func (d *decoder) fromParser(root reflect.Value) error { + for d.nextExpr() { + err := d.handleRootExpression(d.expr(), root) + if err != nil { + return err + } + } + + return d.p.Error() +} + +/* +Rules for the unmarshal code: + +- The stack is used to keep track of which values need to be set where. +- handle* functions <=> switch on a given ast.Kind. +- unmarshalX* functions need to unmarshal a node of kind X. +- An "object" is either a struct or a map. +*/ + +func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { + var x reflect.Value + var err error + + if !(d.skipUntilTable && expr.Kind == ast.KeyValue) { + err = d.seen.CheckExpression(expr) + if err != nil { + return err + } + } + + switch expr.Kind { + case ast.KeyValue: + if d.skipUntilTable { + return nil + } + x, err = d.handleKeyValue(expr, v) + case ast.Table: + d.skipUntilTable = false + d.strict.EnterTable(expr) + x, err = d.handleTable(expr.Key(), v) + case ast.ArrayTable: + d.skipUntilTable = false + d.strict.EnterArrayTable(expr) + x, err = d.handleArrayTable(expr.Key(), v) + default: + panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) + } + + if d.skipUntilTable { + if expr.Kind == ast.Table || expr.Kind == ast.ArrayTable { + d.strict.MissingTable(expr) + } + } else if err == nil && x.IsValid() { + v.Set(x) + } + + return err +} + +func (d *decoder) handleArrayTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + if key.Next() { + return d.handleArrayTablePart(key, v) + } + return d.handleKeyValues(v) +} + +func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + switch v.Kind() { + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + return d.handleArrayTableCollectionLast(key, elem) + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollectionLast(key, elem) + if err != nil { + return reflect.Value{}, err + } + v.Elem().Set(elem) + + return v, nil + case reflect.Slice: + elemType := v.Type().Elem() + var elem reflect.Value + if elemType.Kind() == reflect.Interface { + elem = makeMapStringInterface() + } else { + elem = reflect.New(elemType).Elem() + } + elem2, err := d.handleArrayTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + return reflect.Append(v, elem), nil + case reflect.Array: + idx := d.arrayIndex(true, v) + if idx >= v.Len() { + return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + default: + return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type()) + } +} + +// When parsing an array table expression, each part of the key needs to be +// evaluated like a normal key, but if it returns a collection, it also needs to +// point to the last element of the collection. Unless it is the last part of +// the key, then it needs to create a new element at the end. +func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + if key.IsLast() { + return d.handleArrayTableCollectionLast(key, v) + } + + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollection(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem.IsValid() { + v.Elem().Set(elem) + } + + return v, nil + case reflect.Slice: + elem := v.Index(v.Len() - 1) + x, err := d.handleArrayTable(key, elem) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + + return v, err + case reflect.Array: + idx := d.arrayIndex(false, v) + if idx >= v.Len() { + return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + } + + return d.handleArrayTable(key, v) +} + +func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + v.Set(reflect.New(v.Type().Elem())) + } + elem = v.Elem() + return d.handleKeyPart(key, elem, nextFn, makeFn) + case reflect.Map: + vt := v.Type() + + // Create the key for the map element. Convert to key type. + mk := reflect.ValueOf(string(key.Node().Data)).Convert(vt.Key()) + + // If the map does not exist, create it. + if v.IsNil() { + vt := v.Type() + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + // If there is no value in the map, create a new one according to + // the map type. If the element type is interface, create either a + // map[string]interface{} or a []interface{} depending on whether + // this is the last part of the array table key. + + t := vt.Elem() + if t.Kind() == reflect.Interface { + mv = makeFn() + } else { + mv = reflect.New(t).Elem() + } + set = true + } else if mv.Kind() == reflect.Interface { + mv = mv.Elem() + if !mv.IsValid() { + mv = makeFn() + } + set = true + } else if !mv.CanAddr() { + vt := v.Type() + t := vt.Elem() + oldmv := mv + mv = reflect.New(t).Elem() + mv.Set(oldmv) + set = true + } + + x, err := nextFn(key, mv) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + mv = x + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + return reflect.Value{}, nil + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + x, err := nextFn(key, f) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + f.Set(x) + } + d.errorContext.Field = nil + d.errorContext.Struct = nil + case reflect.Interface: + if v.Elem().IsValid() { + v = v.Elem() + } else { + v = makeMapStringInterface() + } + + x, err := d.handleKeyPart(key, v, nextFn, makeFn) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + default: + panic(fmt.Errorf("unhandled part: %s", v.Kind())) + } + + return rv, nil +} + +// HandleArrayTablePart navigates the Go structure v using the key v. It is +// only used for the prefix (non-last) parts of an array-table. When +// encountering a collection, it should go to the last element. +func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + var makeFn valueMakerFn + if key.IsLast() { + makeFn = makeSliceInterface + } else { + makeFn = makeMapStringInterface + } + return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) +} + +// HandleTable returns a reference when it has checked the next expression but +// cannot handle it. +func (d *decoder) handleTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + if v.Kind() == reflect.Slice { + if v.Len() == 0 { + return reflect.Value{}, newDecodeError(key.Node().Data, "cannot store a table in a slice") + } + elem := v.Index(v.Len() - 1) + x, err := d.handleTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + return reflect.Value{}, nil + } + if key.Next() { + // Still scoping the key + return d.handleTablePart(key, v) + } + // Done scoping the key. + // Now handle all the key-value expressions in this table. + return d.handleKeyValues(v) +} + +// Handle root expressions until the end of the document or the next +// non-key-value. +func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { + var rv reflect.Value + for d.nextExpr() { + expr := d.expr() + if expr.Kind != ast.KeyValue { + // Stash the expression so that fromParser can just loop and use + // the right handler. + // We could just recurse ourselves here, but at least this gives a + // chance to pop the stack a bit. + d.stashExpr() + break + } + + err := d.seen.CheckExpression(expr) + if err != nil { + return reflect.Value{}, err + } + + x, err := d.handleKeyValue(expr, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + rv = x + } + } + return rv, nil +} + +type ( + handlerFn func(key ast.Iterator, v reflect.Value) (reflect.Value, error) + valueMakerFn func() reflect.Value +) + +func makeMapStringInterface() reflect.Value { + return reflect.MakeMap(mapStringInterfaceType) +} + +func makeSliceInterface() reflect.Value { + return reflect.MakeSlice(sliceInterfaceType, 0, 16) +} + +func (d *decoder) handleTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { + return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) +} + +func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, error) { + // Special case for time, because we allow to unmarshal to it from + // different kind of AST nodes. + if v.Type() == timeType { + return false, nil + } + + if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { + err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) + if err != nil { + return false, newDecodeError(d.p.Raw(node.Raw), "%w", err) + } + + return true, nil + } + + return false, nil +} + +func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error { + for v.Kind() == reflect.Ptr { + v = initAndDereferencePointer(v) + } + + ok, err := d.tryTextUnmarshaler(value, v) + if ok || err != nil { + return err + } + + switch value.Kind { + case ast.String: + return d.unmarshalString(value, v) + case ast.Integer: + return d.unmarshalInteger(value, v) + case ast.Float: + return d.unmarshalFloat(value, v) + case ast.Bool: + return d.unmarshalBool(value, v) + case ast.DateTime: + return d.unmarshalDateTime(value, v) + case ast.LocalDate: + return d.unmarshalLocalDate(value, v) + case ast.LocalTime: + return d.unmarshalLocalTime(value, v) + case ast.LocalDateTime: + return d.unmarshalLocalDateTime(value, v) + case ast.InlineTable: + return d.unmarshalInlineTable(value, v) + case ast.Array: + return d.unmarshalArray(value, v) + default: + panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) + } +} + +func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + if v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 16)) + } else { + v.SetLen(0) + } + case reflect.Array: + // arrays are always initialized + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + err := d.unmarshalArray(array, elem) + if err != nil { + return err + } + v.Set(elem) + return nil + default: + // TODO: use newDecodeError, but first the parser needs to fill + // array.Data. + return d.typeMismatchError("array", v.Type()) + } + + elemType := v.Type().Elem() + + it := array.Children() + idx := 0 + for it.Next() { + n := it.Node() + + // TODO: optimize + if v.Kind() == reflect.Slice { + elem := reflect.New(elemType).Elem() + + err := d.handleValue(n, elem) + if err != nil { + return err + } + + v.Set(reflect.Append(v, elem)) + } else { // array + if idx >= v.Len() { + return nil + } + elem := v.Index(idx) + err := d.handleValue(n, elem) + if err != nil { + return err + } + idx++ + } + } + + return nil +} + +func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error { + // Make sure v is an initialized object. + switch v.Kind() { + case reflect.Map: + if v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + case reflect.Struct: + // structs are always initialized. + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = makeMapStringInterface() + v.Set(elem) + } + return d.unmarshalInlineTable(itable, elem) + default: + return newDecodeError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) + } + + it := itable.Children() + for it.Next() { + n := it.Node() + + x, err := d.handleKeyValue(n, v) + if err != nil { + return err + } + if x.IsValid() { + v = x + } + } + + return nil +} + +func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error { + dt, err := parseDateTime(value.Data) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(dt)) + return nil +} + +func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error { + ld, err := parseLocalDate(value.Data) + if err != nil { + return err + } + + if v.Type() == timeType { + cast := ld.AsTime(time.Local) + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ld)) + + return nil +} + +func (d *decoder) unmarshalLocalTime(value *ast.Node, v reflect.Value) error { + lt, rest, err := parseLocalTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return newDecodeError(rest, "extra characters at the end of a local time") + } + + v.Set(reflect.ValueOf(lt)) + return nil +} + +func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error { + ldt, rest, err := parseLocalDateTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return newDecodeError(rest, "extra characters at the end of a local date time") + } + + if v.Type() == timeType { + cast := ldt.AsTime(time.Local) + + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ldt)) + + return nil +} + +func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error { + b := value.Data[0] == 't' + + switch v.Kind() { + case reflect.Bool: + v.SetBool(b) + case reflect.Interface: + v.Set(reflect.ValueOf(b)) + default: + return newDecodeError(value.Data, "cannot assign boolean to a %t", b) + } + + return nil +} + +func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error { + f, err := parseFloat(value.Data) + if err != nil { + return err + } + + switch v.Kind() { + case reflect.Float64: + v.SetFloat(f) + case reflect.Float32: + if f > math.MaxFloat32 { + return newDecodeError(value.Data, "number %f does not fit in a float32", f) + } + v.SetFloat(f) + case reflect.Interface: + v.Set(reflect.ValueOf(f)) + default: + return newDecodeError(value.Data, "float cannot be assigned to %s", v.Kind()) + } + + return nil +} + +const ( + maxInt = int64(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +// Maximum value of uint for decoding. Currently the decoder parses the integer +// into an int64. As a result, on architectures where uint is 64 bits, the +// effective maximum uint we can decode is the maximum of int64. On +// architectures where uint is 32 bits, the maximum value we can decode is +// lower: the maximum of uint32. I didn't find a way to figure out this value at +// compile time, so it is computed during initialization. +var maxUint int64 = math.MaxInt64 + +func init() { + m := uint64(^uint(0)) + if m < uint64(maxUint) { + maxUint = int64(m) + } +} + +func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { + i, err := parseInteger(value.Data) + if err != nil { + return err + } + + var r reflect.Value + + switch v.Kind() { + case reflect.Int64: + v.SetInt(i) + return nil + case reflect.Int32: + if i < math.MinInt32 || i > math.MaxInt32 { + return fmt.Errorf("toml: number %d does not fit in an int32", i) + } + + r = reflect.ValueOf(int32(i)) + case reflect.Int16: + if i < math.MinInt16 || i > math.MaxInt16 { + return fmt.Errorf("toml: number %d does not fit in an int16", i) + } + + r = reflect.ValueOf(int16(i)) + case reflect.Int8: + if i < math.MinInt8 || i > math.MaxInt8 { + return fmt.Errorf("toml: number %d does not fit in an int8", i) + } + + r = reflect.ValueOf(int8(i)) + case reflect.Int: + if i < minInt || i > maxInt { + return fmt.Errorf("toml: number %d does not fit in an int", i) + } + + r = reflect.ValueOf(int(i)) + case reflect.Uint64: + if i < 0 { + return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) + } + + r = reflect.ValueOf(uint64(i)) + case reflect.Uint32: + if i < 0 || i > math.MaxUint32 { + return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) + } + + r = reflect.ValueOf(uint32(i)) + case reflect.Uint16: + if i < 0 || i > math.MaxUint16 { + return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) + } + + r = reflect.ValueOf(uint16(i)) + case reflect.Uint8: + if i < 0 || i > math.MaxUint8 { + return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) + } + + r = reflect.ValueOf(uint8(i)) + case reflect.Uint: + if i < 0 || i > maxUint { + return fmt.Errorf("toml: negative number %d does not fit in an uint", i) + } + + r = reflect.ValueOf(uint(i)) + case reflect.Interface: + r = reflect.ValueOf(i) + default: + return d.typeMismatchError("integer", v.Type()) + } + + if !r.Type().AssignableTo(v.Type()) { + r = r.Convert(v.Type()) + } + + v.Set(r) + + return nil +} + +func (d *decoder) unmarshalString(value *ast.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.String: + v.SetString(string(value.Data)) + case reflect.Interface: + v.Set(reflect.ValueOf(string(value.Data))) + default: + return newDecodeError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) + } + + return nil +} + +func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value, error) { + d.strict.EnterKeyValue(expr) + + v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) + if d.skipUntilTable { + d.strict.MissingField(expr) + d.skipUntilTable = false + } + + d.strict.ExitKeyValue(expr) + + return v, err +} + +func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { + if key.Next() { + // Still scoping the key + return d.handleKeyValuePart(key, value, v) + } + // Done scoping the key. + // v is whatever Go value we need to fill. + return reflect.Value{}, d.handleValue(value, v) +} + +func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { + // contains the replacement for v + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Map: + vt := v.Type() + + mk := reflect.ValueOf(string(key.Node().Data)) + mkt := stringType + + keyType := vt.Key() + if !mkt.AssignableTo(keyType) { + if !mkt.ConvertibleTo(keyType) { + return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", mkt, keyType) + } + + mk = mk.Convert(keyType) + } + + // If the map does not exist, create it. + if v.IsNil() { + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + set = true + mv = reflect.New(v.Type().Elem()).Elem() + } else { + if key.IsLast() { + var x interface{} + mv = reflect.ValueOf(&x).Elem() + set = true + } + } + + nv, err := d.handleKeyValueInner(key, value, mv) + if err != nil { + return reflect.Value{}, err + } + if nv.IsValid() { + mv = nv + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + break + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + x, err := d.handleKeyValueInner(key, value, f) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + f.Set(x) + } + d.errorContext.Struct = nil + d.errorContext.Field = nil + case reflect.Interface: + v = v.Elem() + + // Following encoding/json: decoding an object into an + // interface{}, it needs to always hold a + // map[string]interface{}. This is for the types to be + // consistent whether a previous value was set or not. + if !v.IsValid() || v.Type() != mapStringInterfaceType { + v = makeMapStringInterface() + } + + x, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + rv = v + elem = ptr.Elem() + } + + elem2, err := d.handleKeyValuePart(key, value, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + v.Elem().Set(elem) + default: + return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) + } + + return rv, nil +} + +func initAndDereferencePointer(v reflect.Value) reflect.Value { + var elem reflect.Value + if v.IsNil() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + } + elem = v.Elem() + return elem +} + +// Same as reflect.Value.FieldByIndex, but creates pointers if needed. +func fieldByIndex(v reflect.Value, path []int) reflect.Value { + for i, x := range path { + v = v.Field(x) + + if i < len(path)-1 && v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + return v +} + +type fieldPathsMap = map[string][]int + +var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap + +func structFieldPath(v reflect.Value, name string) ([]int, bool) { + t := v.Type() + + cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) + fieldPaths, ok := cache[danger.MakeTypeID(t)] + + if !ok { + fieldPaths = map[string][]int{} + + forEachField(t, nil, func(name string, path []int) { + fieldPaths[name] = path + // extra copy for the case-insensitive match + fieldPaths[strings.ToLower(name)] = path + }) + + newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) + newCache[danger.MakeTypeID(t)] = fieldPaths + for k, v := range cache { + newCache[k] = v + } + globalFieldPathsCache.Store(newCache) + } + + path, ok := fieldPaths[name] + if !ok { + path, ok = fieldPaths[strings.ToLower(name)] + } + return path, ok +} + +func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + + if !f.Anonymous && f.PkgPath != "" { + // only consider exported fields. + continue + } + + fieldPath := append(path, i) + fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] + + name := f.Tag.Get("toml") + if name == "-" { + continue + } + + if i := strings.IndexByte(name, ','); i >= 0 { + name = name[:i] + } + + if f.Anonymous && name == "" { + t2 := f.Type + if t2.Kind() == reflect.Ptr { + t2 = t2.Elem() + } + + if t2.Kind() == reflect.Struct { + forEachField(t2, fieldPath, do) + } + continue + } + + if name == "" { + name = f.Name + } + + do(name, fieldPath) + } +} diff --git a/vendor/github.com/pelletier/go-toml/v2/utf8.go b/vendor/github.com/pelletier/go-toml/v2/utf8.go new file mode 100644 index 0000000000..d47a4f20c5 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/utf8.go @@ -0,0 +1,240 @@ +package toml + +import ( + "unicode/utf8" +) + +type utf8Err struct { + Index int + Size int +} + +func (u utf8Err) Zero() bool { + return u.Size == 0 +} + +// Verified that a given string is only made of valid UTF-8 characters allowed +// by the TOML spec: +// +// Any Unicode character may be used except those that must be escaped: +// quotation mark, backslash, and the control characters other than tab (U+0000 +// to U+0008, U+000A to U+001F, U+007F). +// +// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early +// when a character is not allowed. +// +// The returned utf8Err is Zero() if the string is valid, or contains the byte +// index and size of the invalid character. +// +// quotation mark => already checked +// backslash => already checked +// 0-0x8 => invalid +// 0x9 => tab, ok +// 0xA - 0x1F => invalid +// 0x7F => invalid +func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { + // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. + offset := 0 + for len(p) >= 8 { + // Combining two 32 bit loads allows the same code to be used + // for 32 and 64 bit platforms. + // The compiler can generate a 32bit load for first32 and second32 + // on many platforms. See test/codegen/memcombine.go. + first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 + if (first32|second32)&0x80808080 != 0 { + // Found a non ASCII byte (>= RuneSelf). + break + } + + for i, b := range p[:8] { + if invalidAscii(b) { + err.Index = offset + i + err.Size = 1 + return + } + } + + p = p[8:] + offset += 8 + } + n := len(p) + for i := 0; i < n; { + pi := p[i] + if pi < utf8.RuneSelf { + if invalidAscii(pi) { + err.Index = offset + i + err.Size = 1 + return + } + i++ + continue + } + x := first[pi] + if x == xx { + // Illegal starter byte. + err.Index = offset + i + err.Size = 1 + return + } + size := int(x & 7) + if i+size > n { + // Short or invalid. + err.Index = offset + i + err.Size = n - i + return + } + accept := acceptRanges[x>>4] + if c := p[i+1]; c < accept.lo || accept.hi < c { + err.Index = offset + i + err.Size = 2 + return + } else if size == 2 { + } else if c := p[i+2]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 3 + return + } else if size == 3 { + } else if c := p[i+3]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 4 + return + } + i += size + } + return +} + +// Return the size of the next rune if valid, 0 otherwise. +func utf8ValidNext(p []byte) int { + c := p[0] + + if c < utf8.RuneSelf { + if invalidAscii(c) { + return 0 + } + return 1 + } + + x := first[c] + if x == xx { + // Illegal starter byte. + return 0 + } + size := int(x & 7) + if size > len(p) { + // Short or invalid. + return 0 + } + accept := acceptRanges[x>>4] + if c := p[1]; c < accept.lo || accept.hi < c { + return 0 + } else if size == 2 { + } else if c := p[2]; c < locb || hicb < c { + return 0 + } else if size == 3 { + } else if c := p[3]; c < locb || hicb < c { + return 0 + } + + return size +} + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func invalidAscii(b byte) bool { + return invalidAsciiTable[b] +} + +// acceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence. +type acceptRange struct { + lo uint8 // lowest value for second byte. + hi uint8 // highest value for second byte. +} + +// acceptRanges has size 16 to avoid bounds checks in the code that uses it. +var acceptRanges = [16]acceptRange{ + 0: {locb, hicb}, + 1: {0xA0, hicb}, + 2: {locb, 0x9F}, + 3: {0x90, hicb}, + 4: {locb, 0x8F}, +} + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + // The default lowest and highest continuation byte. + locb = 0b10000000 + hicb = 0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) diff --git a/vendor/github.com/phayes/checkstyle/.scrutinizer.yml b/vendor/github.com/phayes/checkstyle/.scrutinizer.yml deleted file mode 100644 index d9284b6b4b..0000000000 --- a/vendor/github.com/phayes/checkstyle/.scrutinizer.yml +++ /dev/null @@ -1,15 +0,0 @@ -build: - dependencies: - before: - - 'source <(curl -fsSL https://raw.githubusercontent.com/phayes/go-scrutinize/master/install-golang)' - - tests: - override: - - - command: 'cd $PROJECTPATH && go-scrutinize' - coverage: - file: 'coverage.xml' - format: 'clover' - analysis: - file: 'checkstyle_report.xml' - format: 'general-checkstyle' \ No newline at end of file diff --git a/vendor/github.com/phayes/checkstyle/LICENSE b/vendor/github.com/phayes/checkstyle/LICENSE deleted file mode 100644 index 6dc912f39e..0000000000 --- a/vendor/github.com/phayes/checkstyle/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2017, Patrick D Hayes -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/phayes/checkstyle/README.md b/vendor/github.com/phayes/checkstyle/README.md deleted file mode 100644 index 358cf6752c..0000000000 --- a/vendor/github.com/phayes/checkstyle/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# checkstyle -[![GoDoc](https://godoc.org/github.com/phayes/checkstyle?status.svg)](https://godoc.org/github.com/phayes/checkstyle) -[![Go Report Card](https://goreportcard.com/badge/github.com/phayes/checkstyle)](https://goreportcard.com/report/github.com/phayes/checkstyle) -[![Build Status](https://scrutinizer-ci.com/g/phayes/checkstyle/badges/build.png?b=master)](https://scrutinizer-ci.com/g/phayes/checkstyle/build-status/master) - -Read and write checksyle_report.xml files with golang - -Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools. - -Example usage: - -```go - -import "github.com/phayes/checkstyle" - -// Print XML into human readable format -checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml") -if err != nil { - log.Fatal(err) -} -for _, file := range checkStyle.File { - fmt.Println(File.Name) - for _, codingError := range file.Error { - fmt.Println("\t", codingError.Line, codingError.Message) - } -} - -// Create a new XML file from scratch -check := checkstyle.New() - -// Ensure that a file has been added -file := check.EnsureFile("/path/to/file") - -// Create an error on line 10 -codingError := checkstyle.NewError(10, "format", "line must end with a full stop") - -// Add the error to the file -file.AddError(codingError) - -// Output XML -fmt.Print(check) -``` - -For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html diff --git a/vendor/github.com/phayes/checkstyle/checkstyle.go b/vendor/github.com/phayes/checkstyle/checkstyle.go deleted file mode 100644 index cabbd4b40e..0000000000 --- a/vendor/github.com/phayes/checkstyle/checkstyle.go +++ /dev/null @@ -1,112 +0,0 @@ -package checkstyle - -import "encoding/xml" -import "io/ioutil" - -// DefaultCheckStyleVersion defines the default "version" attribute on "" lememnt -var DefaultCheckStyleVersion = "1.0.0" - -// Severity defines a checkstyle severity code -type Severity string - -var ( - SeverityError Severity = "error" - SeverityInfo Severity = "info" - SeverityWarning Severity = "warning" - SeverityIgnore Severity = "ignore" - SeverityNone Severity -) - -// CheckStyle represents a xml element found in a checkstyle_report.xml file. -type CheckStyle struct { - XMLName xml.Name `xml:"checkstyle"` - Version string `xml:"version,attr"` - File []*File `xml:"file"` -} - -// AddFile adds a checkstyle.File with the given filename. -func (cs *CheckStyle) AddFile(csf *File) { - cs.File = append(cs.File, csf) -} - -// GetFile gets a CheckStyleFile with the given filename. -func (cs *CheckStyle) GetFile(filename string) (csf *File, ok bool) { - for _, file := range cs.File { - if file.Name == filename { - csf = file - ok = true - return - } - } - return -} - -// EnsureFile ensures that a CheckStyleFile with the given name exists -// Returns either an exiting CheckStyleFile (if a file with that name exists) -// or a new CheckStyleFile (if a file with that name does not exists) -func (cs *CheckStyle) EnsureFile(filename string) (csf *File) { - csf, ok := cs.GetFile(filename) - if !ok { - csf = NewFile(filename) - cs.AddFile(csf) - } - return csf -} - -// String implements Stringer. Returns as xml. -func (cs *CheckStyle) String() string { - checkStyleXML, err := xml.Marshal(cs) - if err != nil { - panic(err) - } - return string(checkStyleXML) -} - -// New returns a new CheckStyle -func New() *CheckStyle { - return &CheckStyle{Version: DefaultCheckStyleVersion, File: []*File{}} -} - -// File represents a xml element. -type File struct { - XMLName xml.Name `xml:"file"` - Name string `xml:"name,attr"` - Error []*Error `xml:"error"` -} - -// AddError adds a checkstyle.Error to the file. -func (csf *File) AddError(cse *Error) { - csf.Error = append(csf.Error, cse) -} - -// NewFile creates a new checkstyle.File -func NewFile(filename string) *File { - return &File{Name: filename, Error: []*Error{}} -} - -// Error represents a xml element -type Error struct { - XMLName xml.Name `xml:"error"` - Line int `xml:"line,attr"` - Column int `xml:"column,attr,omitempty"` - Severity Severity `xml:"severity,attr,omitempty"` - Message string `xml:"message,attr"` - Source string `xml:"source,attr"` -} - -// NewError creates a new checkstyle.Error -// Note that line starts at 0, and column starts at 1 -func NewError(line int, column int, severity Severity, message string, source string) *Error { - return &Error{Line: line, Column: column, Severity: severity, Message: message, Source: source} -} - -// ReadFile reads a checkfile.xml file and returns a CheckStyle object. -func ReadFile(filename string) (*CheckStyle, error) { - checkStyleXML, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - checkStyle := New() - err = xml.Unmarshal(checkStyleXML, checkStyle) - return checkStyle, err -} diff --git a/vendor/github.com/phayes/checkstyle/godoc.go b/vendor/github.com/phayes/checkstyle/godoc.go deleted file mode 100644 index c9662fe9ed..0000000000 --- a/vendor/github.com/phayes/checkstyle/godoc.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Package checkstyle allows the parsing of generation of checkstyle XML files. - -Checkstyle XML files are a standard file format for reporting errors in source code, and is often generated by static analysis tools. - -Example usage: - // Print XML into human readable format - checkSyle, err := checkstyle.ReadFile("checkstyle_report.xml") - if err != nil { - log.Fatal(err) - } - for _, file := range checkStyle.File { - fmt.Println(File.Name) - for _, codingError := range file.Error { - fmt.Println("\t", codingError.Line, codingError.Message) - } - } - - // Create a new XML file from scratch - check := checkstyle.New() - - // Ensure that a file has been added - file := check.EnsureFile("/path/to/file") - - // Create an error on line 10, column 5 - codingError := checkstyle.NewError(10, 5, checkstyle.SeverityWarning, "format", "line must end with a full stop") - - // Add the error to the file - file.AddError(codingError) - - // Output XML - fmt.Print(check) - -For more information on checkstyle XML see: http://checkstyle.sourceforge.net/checks.html -*/ -package checkstyle diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de03e0..0000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e755..0000000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cded6..0000000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb12e..0000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade02..0000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2582..0000000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d0c7..0000000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a8348fb..0000000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go index 7fe4c38cc2..8bfb4c9b2a 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go @@ -3,6 +3,8 @@ package errorlint import ( "fmt" "go/ast" + "go/types" + "strings" ) var allowedErrors = []struct { @@ -10,8 +12,8 @@ var allowedErrors = []struct { fun string }{ // pkg/archive/tar - {err: "io.EOF", fun: "(*tar.Reader).Next"}, - {err: "io.EOF", fun: "(*tar.Reader).Read"}, + {err: "io.EOF", fun: "(*archive/tar.Reader).Next"}, + {err: "io.EOF", fun: "(*archive/tar.Reader).Read"}, // pkg/bufio {err: "io.EOF", fun: "(*bufio.Reader).Discard"}, {err: "io.EOF", fun: "(*bufio.Reader).Peek"}, @@ -34,22 +36,31 @@ var allowedErrors = []struct { {err: "io.EOF", fun: "(*bytes.Reader).ReadRune"}, {err: "io.EOF", fun: "(*bytes.Reader).ReadString"}, // pkg/database/sql - {err: "sql.ErrNoRows", fun: "(*database/sql.Row).Scan"}, + {err: "database/sql.ErrNoRows", fun: "(*database/sql.Row).Scan"}, + // pkg/debug/elf + {err: "io.EOF", fun: "debug/elf.Open"}, + {err: "io.EOF", fun: "debug/elf.NewFile"}, // pkg/io + {err: "io.EOF", fun: "(io.ReadCloser).Read"}, {err: "io.EOF", fun: "(io.Reader).Read"}, + {err: "io.EOF", fun: "(io.ReaderAt).ReadAt"}, + {err: "io.EOF", fun: "(*io.LimitedReader).Read"}, + {err: "io.EOF", fun: "(*io.SectionReader).Read"}, + {err: "io.EOF", fun: "(*io.SectionReader).ReadAt"}, {err: "io.ErrClosedPipe", fun: "(*io.PipeWriter).Write"}, {err: "io.ErrShortBuffer", fun: "io.ReadAtLeast"}, {err: "io.ErrUnexpectedEOF", fun: "io.ReadAtLeast"}, + {err: "io.EOF", fun: "io.ReadFull"}, {err: "io.ErrUnexpectedEOF", fun: "io.ReadFull"}, // pkg/net/http - {err: "http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServe"}, - {err: "http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServeTLS"}, - {err: "http.ErrServerClosed", fun: "(*net/http.Server).Serve"}, - {err: "http.ErrServerClosed", fun: "(*net/http.Server).ServeTLS"}, - {err: "http.ErrServerClosed", fun: "http.ListenAndServe"}, - {err: "http.ErrServerClosed", fun: "http.ListenAndServeTLS"}, - {err: "http.ErrServerClosed", fun: "http.Serve"}, - {err: "http.ErrServerClosed", fun: "http.ServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServe"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).ListenAndServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).Serve"}, + {err: "net/http.ErrServerClosed", fun: "(*net/http.Server).ServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "net/http.ListenAndServe"}, + {err: "net/http.ErrServerClosed", fun: "net/http.ListenAndServeTLS"}, + {err: "net/http.ErrServerClosed", fun: "net/http.Serve"}, + {err: "net/http.ErrServerClosed", fun: "net/http.ServeTLS"}, // pkg/os {err: "io.EOF", fun: "(*os.File).Read"}, {err: "io.EOF", fun: "(*os.File).ReadAt"}, @@ -61,9 +72,26 @@ var allowedErrors = []struct { {err: "io.EOF", fun: "(*strings.Reader).ReadAt"}, {err: "io.EOF", fun: "(*strings.Reader).ReadByte"}, {err: "io.EOF", fun: "(*strings.Reader).ReadRune"}, + // pkg/context + {err: "context.DeadlineExceeded", fun: "(context.Context).Err"}, + {err: "context.Canceled", fun: "(context.Context).Err"}, +} + +var allowedErrorWildcards = []struct { + err string + fun string +}{ + // golang.org/x/sys/unix + {err: "golang.org/x/sys/unix.E", fun: "golang.org/x/sys/unix."}, } func isAllowedErrAndFunc(err, fun string) bool { + for _, allow := range allowedErrorWildcards { + if strings.HasPrefix(fun, allow.fun) && strings.HasPrefix(err, allow.err) { + return true + } + } + for _, allow := range allowedErrors { if allow.fun == fun && allow.err == err { return true @@ -72,7 +100,7 @@ func isAllowedErrAndFunc(err, fun string) bool { return false } -func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool { +func isAllowedErrorComparison(pass *TypesInfoExt, binExpr *ast.BinaryExpr) bool { var errName string // `.`, e.g. `io.EOF` var callExprs []*ast.CallExpr @@ -83,11 +111,11 @@ func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool case *ast.SelectorExpr: // A selector which we assume refers to a staticaly declared error // in a package. - errName = selectorToString(t) + errName = selectorToString(pass, t) case *ast.Ident: // Identifier, most likely to be the `err` variable or whatever // produces it. - callExprs = assigningCallExprs(info, t) + callExprs = assigningCallExprs(pass, t, map[types.Object]bool{}) case *ast.CallExpr: callExprs = append(callExprs, t) } @@ -107,11 +135,11 @@ func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool // allowed. return false } - if sel, ok := info.Selections[functionSelector]; ok { + if sel, ok := pass.TypesInfo.Selections[functionSelector]; ok { functionNames[i] = fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) } else { // If there is no selection, assume it is a package. - functionNames[i] = selectorToString(callExpr.Fun.(*ast.SelectorExpr)) + functionNames[i] = selectorToString(pass, callExpr.Fun.(*ast.SelectorExpr)) } } @@ -126,17 +154,23 @@ func isAllowedErrorComparison(info *TypesInfoExt, binExpr *ast.BinaryExpr) bool // assigningCallExprs finds all *ast.CallExpr nodes that are part of an // *ast.AssignStmt that assign to the subject identifier. -func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr { +func assigningCallExprs(pass *TypesInfoExt, subject *ast.Ident, visitedObjects map[types.Object]bool) []*ast.CallExpr { if subject.Obj == nil { return nil } - // Find other identifiers that reference this same object. Make sure to - // exclude the subject identifier as it will cause an infinite recursion - // and is being used in a read operation anyway. - sobj := info.ObjectOf(subject) + // Find other identifiers that reference this same object. + sobj := pass.TypesInfo.ObjectOf(subject) + + if visitedObjects[sobj] { + return nil + } + visitedObjects[sobj] = true + + // Make sure to exclude the subject identifier as it will cause an infinite recursion and is + // being used in a read operation anyway. identifiers := []*ast.Ident{} - for _, ident := range info.IdentifiersForObject[sobj] { + for _, ident := range pass.IdentifiersForObject[sobj] { if subject.Pos() != ident.Pos() { identifiers = append(identifiers, ident) } @@ -145,7 +179,7 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr // Find out whether the identifiers are part of an assignment statement. var callExprs []*ast.CallExpr for _, ident := range identifiers { - parent := info.NodeParent[ident] + parent := pass.NodeParent[ident] switch declT := parent.(type) { case *ast.AssignStmt: // The identifier is LHS of an assignment. @@ -153,10 +187,10 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr assigningExpr := assignment.Rhs[0] // If the assignment is comprised of multiple expressions, find out - // which LHS expression we should use by finding its index in the LHS. - if len(assignment.Rhs) > 1 { + // which RHS expression we should use by finding its index in the LHS. + if len(assignment.Lhs) == len(assignment.Rhs) { for i, lhs := range assignment.Lhs { - if subject.Name == lhs.(*ast.Ident).Name { + if ident, ok := lhs.(*ast.Ident); ok && subject.Name == ident.Name { assigningExpr = assignment.Rhs[i] break } @@ -173,7 +207,7 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr continue } // The subject was the result of assigning from another identifier. - callExprs = append(callExprs, assigningCallExprs(info, assignT)...) + callExprs = append(callExprs, assigningCallExprs(pass, assignT, visitedObjects)...) default: // TODO: inconclusive? } @@ -182,9 +216,7 @@ func assigningCallExprs(info *TypesInfoExt, subject *ast.Ident) []*ast.CallExpr return callExprs } -func selectorToString(selExpr *ast.SelectorExpr) string { - if ident, ok := selExpr.X.(*ast.Ident); ok { - return ident.Name + "." + selExpr.Sel.Name - } - return "" +func selectorToString(pass *TypesInfoExt, selExpr *ast.SelectorExpr) string { + o := pass.TypesInfo.Uses[selExpr.Sel] + return fmt.Sprintf("%s.%s", o.Pkg().Path(), o.Name()) } diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go index ab02136f47..f034913ea3 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/analysis.go @@ -19,43 +19,45 @@ func NewAnalyzer() *analysis.Analyzer { } var ( - flagSet flag.FlagSet - checkComparison bool - checkAsserts bool - checkErrorf bool + flagSet flag.FlagSet + checkComparison bool + checkAsserts bool + checkErrorf bool + checkErrorfMulti bool ) func init() { flagSet.BoolVar(&checkComparison, "comparison", true, "Check for plain error comparisons") flagSet.BoolVar(&checkAsserts, "asserts", true, "Check for plain type assertions and type switches") flagSet.BoolVar(&checkErrorf, "errorf", false, "Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats") + flagSet.BoolVar(&checkErrorfMulti, "errorf-multi", true, "Permit more than 1 %w verb, valid per Go 1.20 (Requires -errorf=true)") } func run(pass *analysis.Pass) (interface{}, error) { - lints := []Lint{} - extInfo := newTypesInfoExt(pass.TypesInfo) + lints := []analysis.Diagnostic{} + extInfo := newTypesInfoExt(pass) if checkComparison { - l := LintErrorComparisons(pass.Fset, extInfo) + l := LintErrorComparisons(extInfo) lints = append(lints, l...) } if checkAsserts { - l := LintErrorTypeAssertions(pass.Fset, *pass.TypesInfo) + l := LintErrorTypeAssertions(pass.Fset, extInfo) lints = append(lints, l...) } if checkErrorf { - l := LintFmtErrorfCalls(pass.Fset, *pass.TypesInfo) + l := LintFmtErrorfCalls(pass.Fset, *pass.TypesInfo, checkErrorfMulti) lints = append(lints, l...) } sort.Sort(ByPosition(lints)) for _, l := range lints { - pass.Report(analysis.Diagnostic{Pos: l.Pos, Message: l.Message}) + pass.Report(l) } return nil, nil } type TypesInfoExt struct { - types.Info + *analysis.Pass // Maps AST nodes back to the node they are contained within. NodeParent map[ast.Node]ast.Node @@ -64,9 +66,9 @@ type TypesInfoExt struct { IdentifiersForObject map[types.Object][]*ast.Ident } -func newTypesInfoExt(info *types.Info) *TypesInfoExt { +func newTypesInfoExt(pass *analysis.Pass) *TypesInfoExt { nodeParent := map[ast.Node]ast.Node{} - for node := range info.Scopes { + for node := range pass.TypesInfo.Scopes { file, ok := node.(*ast.File) if !ok { continue @@ -84,15 +86,15 @@ func newTypesInfoExt(info *types.Info) *TypesInfoExt { } identifiersForObject := map[types.Object][]*ast.Ident{} - for node, obj := range info.Defs { + for node, obj := range pass.TypesInfo.Defs { identifiersForObject[obj] = append(identifiersForObject[obj], node) } - for node, obj := range info.Uses { + for node, obj := range pass.TypesInfo.Uses { identifiersForObject[obj] = append(identifiersForObject[obj], node) } return &TypesInfoExt{ - Info: *info, + Pass: pass, NodeParent: nodeParent, IdentifiersForObject: identifiersForObject, } diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go index fb065ced13..572a3816d8 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go @@ -6,14 +6,11 @@ import ( "go/constant" "go/token" "go/types" -) -type Lint struct { - Message string - Pos token.Pos -} + "golang.org/x/tools/go/analysis" +) -type ByPosition []Lint +type ByPosition []analysis.Diagnostic func (l ByPosition) Len() int { return len(l) } func (l ByPosition) Swap(i, j int) { l[i], l[j] = l[j], l[i] } @@ -22,8 +19,8 @@ func (l ByPosition) Less(i, j int) bool { return l[i].Pos < l[j].Pos } -func LintFmtErrorfCalls(fset *token.FileSet, info types.Info) []Lint { - lints := []Lint{} +func LintFmtErrorfCalls(fset *token.FileSet, info types.Info, multipleWraps bool) []analysis.Diagnostic { + lints := []analysis.Diagnostic{} for expr, t := range info.Types { // Search for error expressions that are the result of fmt.Errorf // invocations. @@ -41,53 +38,88 @@ func LintFmtErrorfCalls(fset *token.FileSet, info types.Info) []Lint { continue } - // For any arguments that are errors, check whether the wrapping verb - // is used. Only one %w verb may be used in a single format string at a - // time, so we stop after finding a correct %w. - var lintArg ast.Expr + // For any arguments that are errors, check whether the wrapping verb is used. %w may occur + // for multiple errors in one Errorf invocation, unless multipleWraps is true. We raise an + // issue if at least one error does not have a corresponding wrapping verb. args := call.Args[1:] - for i := 0; i < len(args) && i < len(formatVerbs); i++ { - if !implementsError(info.Types[args[i]].Type) && !isErrorStringCall(info, args[i]) { - continue - } + if !multipleWraps { + wrapCount := 0 + for i := 0; i < len(args) && i < len(formatVerbs); i++ { + arg := args[i] + if !implementsError(info.Types[arg].Type) { + continue + } + verb := formatVerbs[i] + + if verb.format == "w" { + wrapCount++ + if wrapCount > 1 { + lints = append(lints, analysis.Diagnostic{ + Message: "only one %w verb is permitted per format string", + Pos: arg.Pos(), + }) + break + } + } - if formatVerbs[i] == "w" { - lintArg = nil - break + if wrapCount == 0 { + lints = append(lints, analysis.Diagnostic{ + Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", + Pos: args[i].Pos(), + }) + break + } } - if lintArg == nil { - lintArg = args[i] - } - } - if lintArg != nil { - lints = append(lints, Lint{ - Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", - Pos: lintArg.Pos(), - }) - } - } - return lints -} + } else { + var lint *analysis.Diagnostic + argIndex := 0 + for _, verb := range formatVerbs { + if verb.index != -1 { + argIndex = verb.index + } else { + argIndex++ + } -// isErrorStringCall tests whether the expression is a string expression that -// is the result of an `(error).Error()` method call. -func isErrorStringCall(info types.Info, expr ast.Expr) bool { - if info.Types[expr].Type.String() == "string" { - if call, ok := expr.(*ast.CallExpr); ok { - if callSel, ok := call.Fun.(*ast.SelectorExpr); ok { - fun := info.Uses[callSel.Sel].(*types.Func) - return fun.Type().String() == "func() string" && fun.Name() == "Error" + if verb.format == "w" { + continue + } + if argIndex-1 >= len(args) { + continue + } + arg := args[argIndex-1] + if !implementsError(info.Types[arg].Type) { + continue + } + + strStart := call.Args[0].Pos() + if lint == nil { + lint = &analysis.Diagnostic{ + Message: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors", + Pos: arg.Pos(), + } + } + lint.SuggestedFixes = append(lint.SuggestedFixes, analysis.SuggestedFix{ + Message: "Use `%w` to format errors", + TextEdits: []analysis.TextEdit{{ + Pos: strStart + token.Pos(verb.formatOffset) + 1, + End: strStart + token.Pos(verb.formatOffset) + 2, + NewText: []byte("w"), + }}, + }) + } + if lint != nil { + lints = append(lints, *lint) } } } - return false + return lints } // printfFormatStringVerbs returns a normalized list of all the verbs that are used per argument to -// the printf function. The index of each returned element corresponds to index of the respective -// argument. -func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]string, bool) { +// the printf function. The index of each returned element corresponds to the index of the +// respective argument. +func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]verb, bool) { if len(call.Args) <= 1 { return nil, false } @@ -103,18 +135,8 @@ func printfFormatStringVerbs(info types.Info, call *ast.CallExpr) ([]string, boo if err != nil { return nil, false } - orderedVerbs := verbOrder(verbs, len(call.Args)-1) - - resolvedVerbs := make([]string, len(orderedVerbs)) - for i, vv := range orderedVerbs { - for _, v := range vv { - resolvedVerbs[i] = v.format - if v.format == "w" { - break - } - } - } - return resolvedVerbs, true + + return verbs, true } func isFmtErrorfCallExpr(info types.Info, expr ast.Expr) (*ast.CallExpr, bool) { @@ -136,10 +158,10 @@ func isFmtErrorfCallExpr(info types.Info, expr ast.Expr) (*ast.CallExpr, bool) { return nil, false } -func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { - lints := []Lint{} +func LintErrorComparisons(info *TypesInfoExt) []analysis.Diagnostic { + lints := []analysis.Diagnostic{} - for expr := range info.Types { + for expr := range info.TypesInfo.Types { // Find == and != operations. binExpr, ok := expr.(*ast.BinaryExpr) if !ok { @@ -153,7 +175,7 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { continue } // Find comparisons of which one side is a of type error. - if !isErrorComparison(info.Info, binExpr) { + if !isErrorComparison(info.TypesInfo, binExpr) { continue } // Some errors that are returned from some functions are exempt. @@ -165,13 +187,13 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { continue } - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: fmt.Sprintf("comparing with %s will fail on wrapped errors. Use errors.Is to check for a specific error", binExpr.Op), Pos: binExpr.Pos(), }) } - for scope := range info.Scopes { + for scope := range info.TypesInfo.Scopes { // Find value switch blocks. switchStmt, ok := scope.(*ast.SwitchStmt) if !ok { @@ -181,7 +203,7 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { if switchStmt.Tag == nil { continue } - tagType := info.Types[switchStmt.Tag] + tagType := info.TypesInfo.Types[switchStmt.Tag] if tagType.Type.String() != "error" { continue } @@ -190,7 +212,7 @@ func LintErrorComparisons(fset *token.FileSet, info *TypesInfoExt) []Lint { } if switchComparesNonNil(switchStmt) { - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: "switch on an error will fail on wrapped errors. Use errors.Is to check for specific errors", Pos: switchStmt.Pos(), }) @@ -211,7 +233,7 @@ func isNilComparison(binExpr *ast.BinaryExpr) bool { return false } -func isErrorComparison(info types.Info, binExpr *ast.BinaryExpr) bool { +func isErrorComparison(info *types.Info, binExpr *ast.BinaryExpr) bool { tx := info.Types[binExpr.X] ty := info.Types[binExpr.Y] return tx.Type.String() == "error" || ty.Type.String() == "error" @@ -230,11 +252,11 @@ func isNodeInErrorIsFunc(info *TypesInfoExt, node ast.Node) bool { return false } // There should be 1 argument of type error. - if ii := funcDecl.Type.Params.List; len(ii) != 1 || info.Types[ii[0].Type].Type.String() != "error" { + if ii := funcDecl.Type.Params.List; len(ii) != 1 || info.TypesInfo.Types[ii[0].Type].Type.String() != "error" { return false } // The return type should be bool. - if ii := funcDecl.Type.Results.List; len(ii) != 1 || info.Types[ii[0].Type].Type.String() != "bool" { + if ii := funcDecl.Type.Results.List; len(ii) != 1 || info.TypesInfo.Types[ii[0].Type].Type.String() != "bool" { return false } @@ -266,10 +288,10 @@ func switchComparesNonNil(switchStmt *ast.SwitchStmt) bool { return false } -func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { - lints := []Lint{} +func LintErrorTypeAssertions(fset *token.FileSet, info *TypesInfoExt) []analysis.Diagnostic { + lints := []analysis.Diagnostic{} - for expr := range info.Types { + for expr := range info.TypesInfo.Types { // Find type assertions. typeAssert, ok := expr.(*ast.TypeAssertExpr) if !ok { @@ -277,17 +299,26 @@ func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { } // Find type assertions that operate on values of type error. - if !isErrorTypeAssertion(info, typeAssert) { + if !isErrorTypeAssertion(*info.TypesInfo, typeAssert) { + continue + } + + if isNodeInErrorIsFunc(info, typeAssert) { + continue + } + + // If the asserted type is not an error, allow the expression. + if !implementsError(info.TypesInfo.Types[typeAssert.Type].Type) { continue } - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: "type assertion on error will fail on wrapped errors. Use errors.As to check for specific errors", Pos: typeAssert.Pos(), }) } - for scope := range info.Scopes { + for scope := range info.TypesInfo.Scopes { // Find type switches. typeSwitch, ok := scope.(*ast.TypeSwitchStmt) if !ok { @@ -304,11 +335,15 @@ func LintErrorTypeAssertions(fset *token.FileSet, info types.Info) []Lint { } // Check whether the type switch is on a value of type error. - if !isErrorTypeAssertion(info, typeAssert) { + if !isErrorTypeAssertion(*info.TypesInfo, typeAssert) { + continue + } + + if isNodeInErrorIsFunc(info, typeSwitch) { continue } - lints = append(lints, Lint{ + lints = append(lints, analysis.Diagnostic{ Message: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors", Pos: typeAssert.Pos(), }) diff --git a/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go b/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go index d9a935ff2b..4c0e12525d 100644 --- a/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go +++ b/vendor/github.com/polyfloyd/go-errorlint/errorlint/printf.go @@ -7,30 +7,15 @@ import ( "strings" ) -func verbOrder(verbs []verb, numArgs int) [][]verb { - orderedVerbs := make([][]verb, numArgs) - i := 0 - for _, v := range verbs { - if v.index != -1 { - i = v.index - 1 - } - if i >= len(orderedVerbs) { - continue - } - orderedVerbs[i] = append(orderedVerbs[i], v) - verbs = verbs[1:] - i++ - } - return orderedVerbs -} - type verb struct { - format string - index int + format string + formatOffset int + index int } type printfParser struct { str string + at int } func (pp *printfParser) ParseAllVerbs() ([]verb, error) { @@ -80,7 +65,7 @@ func (pp *printfParser) parseVerb() (*verb, error) { format := pp.next() - return &verb{format: string(format), index: index}, nil + return &verb{format: string(format), formatOffset: pp.at - 1, index: index}, nil } func (pp *printfParser) parseIndex() (int, error) { @@ -96,6 +81,7 @@ func (pp *printfParser) parseIndex() (int, error) { return -1, err } pp.str = pp.str[end+1:] + pp.at += end + 1 return index, nil } @@ -114,6 +100,7 @@ func (pp *printfParser) skipToPercent() error { return io.EOF } pp.str = pp.str[i:] + pp.at += i return nil } @@ -130,5 +117,6 @@ func (pp *printfParser) next() rune { } r := rune(pp.str[0]) pp.str = pp.str[1:] + pp.at++ return r } diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go b/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go index 2f207aa07a..52d0f5204f 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go +++ b/vendor/github.com/quasilyte/go-ruleguard/internal/goenv/goenv.go @@ -3,46 +3,26 @@ package goenv import ( "errors" "os/exec" - "runtime" - "strconv" "strings" ) func Read() (map[string]string, error) { - out, err := exec.Command("go", "env").CombinedOutput() + // pass in a fixed set of var names to avoid needing to unescape output + // pass in literals here instead of a variable list to avoid security linter warnings about command injection + out, err := exec.Command("go", "env", "GOROOT", "GOPATH", "GOARCH", "GOOS", "CGO_ENABLED").CombinedOutput() if err != nil { return nil, err } - return parseGoEnv(out, runtime.GOOS) + return parseGoEnv([]string{"GOROOT", "GOPATH", "GOARCH", "GOOS", "CGO_ENABLED"}, out) } -func parseGoEnv(data []byte, goos string) (map[string]string, error) { +func parseGoEnv(varNames []string, data []byte) (map[string]string, error) { vars := make(map[string]string) lines := strings.Split(strings.ReplaceAll(string(data), "\r\n", "\n"), "\n") - - if goos == "windows" { - // Line format is: `set $name=$value` - for _, l := range lines { - l = strings.TrimPrefix(l, "set ") - parts := strings.Split(l, "=") - if len(parts) != 2 { - continue - } - vars[parts[0]] = parts[1] - } - } else { - // Line format is: `$name="$value"` - for _, l := range lines { - parts := strings.Split(strings.TrimSpace(l), "=") - if len(parts) != 2 { - continue - } - val, err := strconv.Unquote(parts[1]) - if err != nil { - continue - } - vars[parts[0]] = val + for i, varName := range varNames { + if i < len(lines) && len(lines[i]) > 0 { + vars[varName] = lines[i] } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go index a5e6ca4d6b..e4cf954ffd 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/engine.go @@ -8,7 +8,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "sort" "strings" @@ -48,7 +47,7 @@ func (e *engine) LoadedGroups() []GoRuleGroup { } func (e *engine) Load(ctx *LoadContext, buildContext *build.Context, filename string, r io.Reader) error { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return err } @@ -131,6 +130,7 @@ func (e *engine) Run(ctx *RunContext, buildContext *build.Context, f *ast.File) } // engineState is a shared state inside the engine. +// Its access is synchronized, unlike the RunnerState which should be thread-local. type engineState struct { env *quasigo.Env diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go index 604ae4a189..7320ab7fb7 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/filters.go @@ -23,9 +23,16 @@ func filterFailure(reason string) matchFilterResult { return matchFilterResult(reason) } -func exprListFilterApply(src string, list gogrep.ExprSlice, fn func(ast.Expr) bool) matchFilterResult { - for i := 0; i < list.Len(); i++ { - if !fn(list.At(i).(ast.Expr)) { +func asExprSlice(x ast.Node) *gogrep.NodeSlice { + if x, ok := x.(*gogrep.NodeSlice); ok && x.Kind == gogrep.ExprNodeSlice { + return x + } + return nil +} + +func exprListFilterApply(src string, list []ast.Expr, fn func(ast.Expr) bool) matchFilterResult { + for _, e := range list { + if !fn(e) { return filterFailure(src) } } @@ -99,12 +106,11 @@ func makeFileNameMatchesFilter(src string, re textmatch.Pattern) filterFunc { func makePureFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isPure(params.ctx.Types, x) }) } - n := params.subExpr(varname) if isPure(params.ctx.Types, n) { return filterSuccess @@ -115,8 +121,8 @@ func makePureFilter(src, varname string) filterFunc { func makeConstFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isConstant(params.ctx.Types, x) }) } @@ -131,8 +137,8 @@ func makeConstFilter(src, varname string) filterFunc { func makeConstSliceFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isConstantSlice(params.ctx.Types, x) }) } @@ -147,8 +153,8 @@ func makeConstSliceFilter(src, varname string) filterFunc { func makeAddressableFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return isAddressable(params.ctx.Types, x) }) } @@ -163,8 +169,8 @@ func makeAddressableFilter(src, varname string) filterFunc { func makeComparableFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return types.Comparable(params.typeofNode(x)) }) } @@ -212,8 +218,8 @@ func makeCustomVarFilter(src, varname string, fn *quasigo.Func) filterFunc { func makeTypeImplementsFilter(src, varname string, iface *types.Interface) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return xtypes.Implements(params.typeofNode(x), iface) }) } @@ -322,8 +328,8 @@ func makeRootSinkTypeIsFilter(src string, pat *typematch.Pattern) filterFunc { func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Pattern) filterFunc { if underlying { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return pat.MatchIdentical(params.typematchState, params.typeofNode(x).Underlying()) }) } @@ -336,8 +342,8 @@ func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Patte } return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return pat.MatchIdentical(params.typematchState, params.typeofNode(x)) }) } @@ -351,8 +357,8 @@ func makeTypeIsFilter(src, varname string, underlying bool, pat *typematch.Patte func makeTypeConvertibleToFilter(src, varname string, dstType types.Type) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return types.ConvertibleTo(params.typeofNode(x), dstType) }) } @@ -367,8 +373,8 @@ func makeTypeConvertibleToFilter(src, varname string, dstType types.Type) filter func makeTypeAssignableToFilter(src, varname string, dstType types.Type) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { return types.AssignableTo(params.typeofNode(x), dstType) }) } @@ -395,6 +401,28 @@ func makeLineFilter(src, varname string, op token.Token, rhsVarname string) filt } } +func makeObjectIsVariadicParamFilter(src, varname string) filterFunc { + return func(params *filterParams) matchFilterResult { + if params.currentFunc == nil { + return filterFailure(src) + } + funcObj, ok := params.ctx.Types.ObjectOf(params.currentFunc.Name).(*types.Func) + if !ok { + return filterFailure(src) + } + funcSig := funcObj.Type().(*types.Signature) + if !funcSig.Variadic() { + return filterFailure(src) + } + paramObj := funcSig.Params().At(funcSig.Params().Len() - 1) + obj := params.ctx.Types.ObjectOf(identOf(params.subExpr(varname))) + if paramObj != obj { + return filterFailure(src) + } + return filterSuccess + } +} + func makeObjectIsGlobalFilter(src, varname string) filterFunc { return func(params *filterParams) matchFilterResult { obj := params.ctx.Types.ObjectOf(identOf(params.subExpr(varname))) @@ -433,15 +461,21 @@ func makeLineConstFilter(src, varname string, op token.Token, rhsValue constant. func makeTypeSizeConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { typ := params.typeofNode(x) + if isTypeParam(typ) { + return false + } lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(typ)) return constant.Compare(lhsValue, op, rhsValue) }) } typ := params.typeofNode(params.subExpr(varname)) + if isTypeParam(typ) { + return filterFailure(src) + } lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(typ)) if constant.Compare(lhsValue, op, rhsValue) { return filterSuccess @@ -453,8 +487,11 @@ func makeTypeSizeConstFilter(src, varname string, op token.Token, rhsValue const func makeTypeSizeFilter(src, varname string, op token.Token, rhsVarname string) filterFunc { return func(params *filterParams) matchFilterResult { lhsTyp := params.typeofNode(params.subExpr(varname)) - lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(lhsTyp)) rhsTyp := params.typeofNode(params.subExpr(rhsVarname)) + if isTypeParam(lhsTyp) || isTypeParam(rhsTyp) { + return filterFailure(src) + } + lhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(lhsTyp)) rhsValue := constant.MakeInt64(params.ctx.Sizes.Sizeof(rhsTyp)) if constant.Compare(lhsValue, op, rhsValue) { return filterSuccess @@ -465,8 +502,8 @@ func makeTypeSizeFilter(src, varname string, op token.Token, rhsVarname string) func makeValueIntConstFilter(src, varname string, op token.Token, rhsValue constant.Value) filterFunc { return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { lhsValue := intValueOf(params.ctx.Types, x) return lhsValue != nil && constant.Compare(lhsValue, op, rhsValue) }) @@ -606,8 +643,8 @@ func makeObjectIsFilter(src, varname, objectName string) filterFunc { } return func(params *filterParams) matchFilterResult { - if list, ok := params.subNode(varname).(gogrep.ExprSlice); ok { - return exprListFilterApply(src, list, func(x ast.Expr) bool { + if list := asExprSlice(params.subNode(varname)); list != nil { + return exprListFilterApply(src, list.GetExprSlice(), func(x ast.Expr) bool { ident := identOf(x) return ident != nil && predicate(params.ctx.Types.ObjectOf(ident)) }) diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go index c9401c0208..bc2a5ee5b9 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/filter_op.gen.go @@ -88,189 +88,195 @@ const ( // $Value type: string FilterVarObjectIsGlobalOp FilterOp = 23 + // m[$Value].Object.IsVariadicParam() + // $Value type: string + FilterVarObjectIsVariadicParamOp FilterOp = 24 + // m[$Value].Type.Is($Args[0]) // $Value type: string - FilterVarTypeIsOp FilterOp = 24 + FilterVarTypeIsOp FilterOp = 25 // m[$Value].Type.IdenticalTo($Args[0]) // $Value type: string - FilterVarTypeIdenticalToOp FilterOp = 25 + FilterVarTypeIdenticalToOp FilterOp = 26 // m[$Value].Type.Underlying().Is($Args[0]) // $Value type: string - FilterVarTypeUnderlyingIsOp FilterOp = 26 + FilterVarTypeUnderlyingIsOp FilterOp = 27 // m[$Value].Type.OfKind($Args[0]) // $Value type: string - FilterVarTypeOfKindOp FilterOp = 27 + FilterVarTypeOfKindOp FilterOp = 28 // m[$Value].Type.Underlying().OfKind($Args[0]) // $Value type: string - FilterVarTypeUnderlyingOfKindOp FilterOp = 28 + FilterVarTypeUnderlyingOfKindOp FilterOp = 29 // m[$Value].Type.ConvertibleTo($Args[0]) // $Value type: string - FilterVarTypeConvertibleToOp FilterOp = 29 + FilterVarTypeConvertibleToOp FilterOp = 30 // m[$Value].Type.AssignableTo($Args[0]) // $Value type: string - FilterVarTypeAssignableToOp FilterOp = 30 + FilterVarTypeAssignableToOp FilterOp = 31 // m[$Value].Type.Implements($Args[0]) // $Value type: string - FilterVarTypeImplementsOp FilterOp = 31 + FilterVarTypeImplementsOp FilterOp = 32 // m[$Value].Type.HasMethod($Args[0]) // $Value type: string - FilterVarTypeHasMethodOp FilterOp = 32 + FilterVarTypeHasMethodOp FilterOp = 33 // m[$Value].Text.Matches($Args[0]) // $Value type: string - FilterVarTextMatchesOp FilterOp = 33 + FilterVarTextMatchesOp FilterOp = 34 // m[$Value].Contains($Args[0]) // $Value type: string - FilterVarContainsOp FilterOp = 34 + FilterVarContainsOp FilterOp = 35 // m.Deadcode() - FilterDeadcodeOp FilterOp = 35 + FilterDeadcodeOp FilterOp = 36 // m.GoVersion().Eq($Value) // $Value type: string - FilterGoVersionEqOp FilterOp = 36 + FilterGoVersionEqOp FilterOp = 37 // m.GoVersion().LessThan($Value) // $Value type: string - FilterGoVersionLessThanOp FilterOp = 37 + FilterGoVersionLessThanOp FilterOp = 38 // m.GoVersion().GreaterThan($Value) // $Value type: string - FilterGoVersionGreaterThanOp FilterOp = 38 + FilterGoVersionGreaterThanOp FilterOp = 39 // m.GoVersion().LessEqThan($Value) // $Value type: string - FilterGoVersionLessEqThanOp FilterOp = 39 + FilterGoVersionLessEqThanOp FilterOp = 40 // m.GoVersion().GreaterEqThan($Value) // $Value type: string - FilterGoVersionGreaterEqThanOp FilterOp = 40 + FilterGoVersionGreaterEqThanOp FilterOp = 41 // m.File.Imports($Value) // $Value type: string - FilterFileImportsOp FilterOp = 41 + FilterFileImportsOp FilterOp = 42 // m.File.PkgPath.Matches($Value) // $Value type: string - FilterFilePkgPathMatchesOp FilterOp = 42 + FilterFilePkgPathMatchesOp FilterOp = 43 // m.File.Name.Matches($Value) // $Value type: string - FilterFileNameMatchesOp FilterOp = 43 + FilterFileNameMatchesOp FilterOp = 44 // $Value holds a function name // $Value type: string - FilterFilterFuncRefOp FilterOp = 44 + FilterFilterFuncRefOp FilterOp = 45 // $Value holds a string constant // $Value type: string - FilterStringOp FilterOp = 45 + FilterStringOp FilterOp = 46 // $Value holds an int64 constant // $Value type: int64 - FilterIntOp FilterOp = 46 + FilterIntOp FilterOp = 47 // m[`$$`].Node.Parent().Is($Args[0]) - FilterRootNodeParentIsOp FilterOp = 47 + FilterRootNodeParentIsOp FilterOp = 48 // m[`$$`].SinkType.Is($Args[0]) - FilterRootSinkTypeIsOp FilterOp = 48 + FilterRootSinkTypeIsOp FilterOp = 49 ) var filterOpNames = map[FilterOp]string{ - FilterInvalidOp: `Invalid`, - FilterNotOp: `Not`, - FilterAndOp: `And`, - FilterOrOp: `Or`, - FilterEqOp: `Eq`, - FilterNeqOp: `Neq`, - FilterGtOp: `Gt`, - FilterLtOp: `Lt`, - FilterGtEqOp: `GtEq`, - FilterLtEqOp: `LtEq`, - FilterVarAddressableOp: `VarAddressable`, - FilterVarComparableOp: `VarComparable`, - FilterVarPureOp: `VarPure`, - FilterVarConstOp: `VarConst`, - FilterVarConstSliceOp: `VarConstSlice`, - FilterVarTextOp: `VarText`, - FilterVarLineOp: `VarLine`, - FilterVarValueIntOp: `VarValueInt`, - FilterVarTypeSizeOp: `VarTypeSize`, - FilterVarTypeHasPointersOp: `VarTypeHasPointers`, - FilterVarFilterOp: `VarFilter`, - FilterVarNodeIsOp: `VarNodeIs`, - FilterVarObjectIsOp: `VarObjectIs`, - FilterVarObjectIsGlobalOp: `VarObjectIsGlobal`, - FilterVarTypeIsOp: `VarTypeIs`, - FilterVarTypeIdenticalToOp: `VarTypeIdenticalTo`, - FilterVarTypeUnderlyingIsOp: `VarTypeUnderlyingIs`, - FilterVarTypeOfKindOp: `VarTypeOfKind`, - FilterVarTypeUnderlyingOfKindOp: `VarTypeUnderlyingOfKind`, - FilterVarTypeConvertibleToOp: `VarTypeConvertibleTo`, - FilterVarTypeAssignableToOp: `VarTypeAssignableTo`, - FilterVarTypeImplementsOp: `VarTypeImplements`, - FilterVarTypeHasMethodOp: `VarTypeHasMethod`, - FilterVarTextMatchesOp: `VarTextMatches`, - FilterVarContainsOp: `VarContains`, - FilterDeadcodeOp: `Deadcode`, - FilterGoVersionEqOp: `GoVersionEq`, - FilterGoVersionLessThanOp: `GoVersionLessThan`, - FilterGoVersionGreaterThanOp: `GoVersionGreaterThan`, - FilterGoVersionLessEqThanOp: `GoVersionLessEqThan`, - FilterGoVersionGreaterEqThanOp: `GoVersionGreaterEqThan`, - FilterFileImportsOp: `FileImports`, - FilterFilePkgPathMatchesOp: `FilePkgPathMatches`, - FilterFileNameMatchesOp: `FileNameMatches`, - FilterFilterFuncRefOp: `FilterFuncRef`, - FilterStringOp: `String`, - FilterIntOp: `Int`, - FilterRootNodeParentIsOp: `RootNodeParentIs`, - FilterRootSinkTypeIsOp: `RootSinkTypeIs`, + FilterInvalidOp: `Invalid`, + FilterNotOp: `Not`, + FilterAndOp: `And`, + FilterOrOp: `Or`, + FilterEqOp: `Eq`, + FilterNeqOp: `Neq`, + FilterGtOp: `Gt`, + FilterLtOp: `Lt`, + FilterGtEqOp: `GtEq`, + FilterLtEqOp: `LtEq`, + FilterVarAddressableOp: `VarAddressable`, + FilterVarComparableOp: `VarComparable`, + FilterVarPureOp: `VarPure`, + FilterVarConstOp: `VarConst`, + FilterVarConstSliceOp: `VarConstSlice`, + FilterVarTextOp: `VarText`, + FilterVarLineOp: `VarLine`, + FilterVarValueIntOp: `VarValueInt`, + FilterVarTypeSizeOp: `VarTypeSize`, + FilterVarTypeHasPointersOp: `VarTypeHasPointers`, + FilterVarFilterOp: `VarFilter`, + FilterVarNodeIsOp: `VarNodeIs`, + FilterVarObjectIsOp: `VarObjectIs`, + FilterVarObjectIsGlobalOp: `VarObjectIsGlobal`, + FilterVarObjectIsVariadicParamOp: `VarObjectIsVariadicParam`, + FilterVarTypeIsOp: `VarTypeIs`, + FilterVarTypeIdenticalToOp: `VarTypeIdenticalTo`, + FilterVarTypeUnderlyingIsOp: `VarTypeUnderlyingIs`, + FilterVarTypeOfKindOp: `VarTypeOfKind`, + FilterVarTypeUnderlyingOfKindOp: `VarTypeUnderlyingOfKind`, + FilterVarTypeConvertibleToOp: `VarTypeConvertibleTo`, + FilterVarTypeAssignableToOp: `VarTypeAssignableTo`, + FilterVarTypeImplementsOp: `VarTypeImplements`, + FilterVarTypeHasMethodOp: `VarTypeHasMethod`, + FilterVarTextMatchesOp: `VarTextMatches`, + FilterVarContainsOp: `VarContains`, + FilterDeadcodeOp: `Deadcode`, + FilterGoVersionEqOp: `GoVersionEq`, + FilterGoVersionLessThanOp: `GoVersionLessThan`, + FilterGoVersionGreaterThanOp: `GoVersionGreaterThan`, + FilterGoVersionLessEqThanOp: `GoVersionLessEqThan`, + FilterGoVersionGreaterEqThanOp: `GoVersionGreaterEqThan`, + FilterFileImportsOp: `FileImports`, + FilterFilePkgPathMatchesOp: `FilePkgPathMatches`, + FilterFileNameMatchesOp: `FileNameMatches`, + FilterFilterFuncRefOp: `FilterFuncRef`, + FilterStringOp: `String`, + FilterIntOp: `Int`, + FilterRootNodeParentIsOp: `RootNodeParentIs`, + FilterRootSinkTypeIsOp: `RootSinkTypeIs`, } var filterOpFlags = map[FilterOp]uint64{ - FilterAndOp: flagIsBinaryExpr, - FilterOrOp: flagIsBinaryExpr, - FilterEqOp: flagIsBinaryExpr, - FilterNeqOp: flagIsBinaryExpr, - FilterGtOp: flagIsBinaryExpr, - FilterLtOp: flagIsBinaryExpr, - FilterGtEqOp: flagIsBinaryExpr, - FilterLtEqOp: flagIsBinaryExpr, - FilterVarAddressableOp: flagHasVar, - FilterVarComparableOp: flagHasVar, - FilterVarPureOp: flagHasVar, - FilterVarConstOp: flagHasVar, - FilterVarConstSliceOp: flagHasVar, - FilterVarTextOp: flagHasVar, - FilterVarLineOp: flagHasVar, - FilterVarValueIntOp: flagHasVar, - FilterVarTypeSizeOp: flagHasVar, - FilterVarTypeHasPointersOp: flagHasVar, - FilterVarFilterOp: flagHasVar, - FilterVarNodeIsOp: flagHasVar, - FilterVarObjectIsOp: flagHasVar, - FilterVarObjectIsGlobalOp: flagHasVar, - FilterVarTypeIsOp: flagHasVar, - FilterVarTypeIdenticalToOp: flagHasVar, - FilterVarTypeUnderlyingIsOp: flagHasVar, - FilterVarTypeOfKindOp: flagHasVar, - FilterVarTypeUnderlyingOfKindOp: flagHasVar, - FilterVarTypeConvertibleToOp: flagHasVar, - FilterVarTypeAssignableToOp: flagHasVar, - FilterVarTypeImplementsOp: flagHasVar, - FilterVarTypeHasMethodOp: flagHasVar, - FilterVarTextMatchesOp: flagHasVar, - FilterVarContainsOp: flagHasVar, - FilterStringOp: flagIsBasicLit, - FilterIntOp: flagIsBasicLit, + FilterAndOp: flagIsBinaryExpr, + FilterOrOp: flagIsBinaryExpr, + FilterEqOp: flagIsBinaryExpr, + FilterNeqOp: flagIsBinaryExpr, + FilterGtOp: flagIsBinaryExpr, + FilterLtOp: flagIsBinaryExpr, + FilterGtEqOp: flagIsBinaryExpr, + FilterLtEqOp: flagIsBinaryExpr, + FilterVarAddressableOp: flagHasVar, + FilterVarComparableOp: flagHasVar, + FilterVarPureOp: flagHasVar, + FilterVarConstOp: flagHasVar, + FilterVarConstSliceOp: flagHasVar, + FilterVarTextOp: flagHasVar, + FilterVarLineOp: flagHasVar, + FilterVarValueIntOp: flagHasVar, + FilterVarTypeSizeOp: flagHasVar, + FilterVarTypeHasPointersOp: flagHasVar, + FilterVarFilterOp: flagHasVar, + FilterVarNodeIsOp: flagHasVar, + FilterVarObjectIsOp: flagHasVar, + FilterVarObjectIsGlobalOp: flagHasVar, + FilterVarObjectIsVariadicParamOp: flagHasVar, + FilterVarTypeIsOp: flagHasVar, + FilterVarTypeIdenticalToOp: flagHasVar, + FilterVarTypeUnderlyingIsOp: flagHasVar, + FilterVarTypeOfKindOp: flagHasVar, + FilterVarTypeUnderlyingOfKindOp: flagHasVar, + FilterVarTypeConvertibleToOp: flagHasVar, + FilterVarTypeAssignableToOp: flagHasVar, + FilterVarTypeImplementsOp: flagHasVar, + FilterVarTypeHasMethodOp: flagHasVar, + FilterVarTextMatchesOp: flagHasVar, + FilterVarContainsOp: flagHasVar, + FilterStringOp: flagIsBasicLit, + FilterIntOp: flagIsBasicLit, } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go index d3b7409053..b1c8194926 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir/gen_filter_op.go @@ -7,7 +7,7 @@ import ( "bytes" "fmt" "go/format" - "io/ioutil" + "os" "strings" ) @@ -55,6 +55,7 @@ func main() { {name: "VarNodeIs", comment: "m[$Value].Node.Is($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarObjectIs", comment: "m[$Value].Object.Is($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarObjectIsGlobal", comment: "m[$Value].Object.IsGlobal()", valueType: "string", flags: flagHasVar}, + {name: "VarObjectIsVariadicParam", comment: "m[$Value].Object.IsVariadicParam()", valueType: "string", flags: flagHasVar}, {name: "VarTypeIs", comment: "m[$Value].Type.Is($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarTypeIdenticalTo", comment: "m[$Value].Type.IdenticalTo($Args[0])", valueType: "string", flags: flagHasVar}, {name: "VarTypeUnderlyingIs", comment: "m[$Value].Type.Underlying().Is($Args[0])", valueType: "string", flags: flagHasVar}, @@ -141,7 +142,7 @@ func main() { panic(err) } - if err := ioutil.WriteFile("filter_op.gen.go", pretty, 0644); err != nil { + if err := os.WriteFile("filter_op.gen.go", pretty, 0644); err != nil { panic(err) } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go index c07a19f54f..dcb2fd2afa 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ir_loader.go @@ -8,7 +8,7 @@ import ( "go/parser" "go/token" "go/types" - "io/ioutil" + "os" "regexp" "github.com/quasilyte/gogrep" @@ -144,7 +144,7 @@ func (l *irLoader) loadBundle(bundle ir.BundleImport) error { } func (l *irLoader) loadExternFile(prefix, pkgPath, filename string) (*goRuleSet, error) { - src, err := ioutil.ReadFile(filename) + src, err := os.ReadFile(filename) if err != nil { return nil, err } @@ -699,6 +699,8 @@ func (l *irLoader) newFilter(filter ir.FilterExpr, info *filterInfo) (matchFilte result.fn = makeConstFilter(result.src, filter.Value.(string)) case ir.FilterVarObjectIsGlobalOp: result.fn = makeObjectIsGlobalFilter(result.src, filter.Value.(string)) + case ir.FilterVarObjectIsVariadicParamOp: + result.fn = makeObjectIsVariadicParamFilter(result.src, filter.Value.(string)) case ir.FilterVarConstSliceOp: result.fn = makeConstSliceFilter(result.src, filter.Value.(string)) case ir.FilterVarAddressableOp: diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go index 646091fedc..4eb90d51b2 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/irconv/irconv.go @@ -746,6 +746,8 @@ func (conv *converter) convertFilterExprImpl(e ast.Expr) ir.FilterExpr { return ir.FilterExpr{Op: ir.FilterVarObjectIsOp, Value: op.varName, Args: args} case "Object.IsGlobal": return ir.FilterExpr{Op: ir.FilterVarObjectIsGlobalOp, Value: op.varName} + case "Object.IsVariadicParam": + return ir.FilterExpr{Op: ir.FilterVarObjectIsVariadicParamOp, Value: op.varName} case "SinkType.Is": if op.varName != "$$" { // TODO: remove this restriction. diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go index 3bf3bf5a82..b0909f75fd 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/match_data.go @@ -6,41 +6,14 @@ import ( "github.com/quasilyte/gogrep" ) -// matchData is used to handle both regexp and AST match sets in the same way. -type matchData interface { - // TODO: don't use gogrep.CapturedNode type here. - - Node() ast.Node - CaptureList() []gogrep.CapturedNode - CapturedByName(name string) (ast.Node, bool) -} - -type commentMatchData struct { - node ast.Node - capture []gogrep.CapturedNode -} - -func (m commentMatchData) Node() ast.Node { return m.node } - -func (m commentMatchData) CaptureList() []gogrep.CapturedNode { return m.capture } - -func (m commentMatchData) CapturedByName(name string) (ast.Node, bool) { - for _, c := range m.capture { - if c.Name == name { - return c.Node, true - } - } - return nil, false -} - -type astMatchData struct { +type matchData struct { match gogrep.MatchData } -func (m astMatchData) Node() ast.Node { return m.match.Node } +func (m matchData) Node() ast.Node { return m.match.Node } -func (m astMatchData) CaptureList() []gogrep.CapturedNode { return m.match.Capture } +func (m matchData) CaptureList() []gogrep.CapturedNode { return m.match.Capture } -func (m astMatchData) CapturedByName(name string) (ast.Node, bool) { +func (m matchData) CapturedByName(name string) (ast.Node, bool) { return m.match.CapturedByName(name) } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go index b0f02f0aa2..4ba741ee2f 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/nodepath.go @@ -10,8 +10,8 @@ type nodePath struct { stack []ast.Node } -func newNodePath() nodePath { - return nodePath{stack: make([]ast.Node, 0, 32)} +func newNodePath() *nodePath { + return &nodePath{stack: make([]ast.Node, 0, 32)} } func (p nodePath) String() string { @@ -22,15 +22,15 @@ func (p nodePath) String() string { return strings.Join(parts, "/") } -func (p nodePath) Parent() ast.Node { +func (p *nodePath) Parent() ast.Node { return p.NthParent(1) } -func (p nodePath) Current() ast.Node { +func (p *nodePath) Current() ast.Node { return p.NthParent(0) } -func (p nodePath) NthParent(n int) ast.Node { +func (p *nodePath) NthParent(n int) ast.Node { index := uint(len(p.stack) - n - 1) if index < uint(len(p.stack)) { return p.stack[index] diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go index c8d512038c..80386df28a 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/quasigo/gen_opcodes.go @@ -7,8 +7,8 @@ import ( "bytes" "fmt" "go/format" - "io/ioutil" "log" + "os" "strings" "text/template" ) @@ -186,7 +186,7 @@ func writeFile(filename string, data []byte) { if err != nil { log.Panicf("gofmt: %v", err) } - if err := ioutil.WriteFile(filename, pretty, 0666); err != nil { + if err := os.WriteFile(filename, pretty, 0666); err != nil { log.Panicf("write %s: %v", filename, err) } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go index 1a2e2f05f9..41fbc8995d 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go @@ -8,6 +8,9 @@ import ( "io" "github.com/quasilyte/go-ruleguard/ruleguard/ir" + "github.com/quasilyte/go-ruleguard/ruleguard/quasigo" + "github.com/quasilyte/go-ruleguard/ruleguard/typematch" + "github.com/quasilyte/gogrep" ) // Engine is the main ruleguard package API object. @@ -88,6 +91,21 @@ type LoadContext struct { Fset *token.FileSet } +type RunnerState struct { + gogrepState gogrep.MatcherState + gogrepSubState gogrep.MatcherState + nodePath *nodePath + evalEnv *quasigo.EvalEnv + typematchState *typematch.MatcherState + + object *rulesRunner +} + +// NewRunnerState creates a state object that can be used with RunContext. +func NewRunnerState(e *Engine) *RunnerState { + return newRunnerState(e.impl.state) +} + type RunContext struct { Debug string DebugImports bool @@ -115,6 +133,20 @@ type RunContext struct { // Note that this value is ignored for Suggest templates. // Ruleguard doesn't truncate suggested replacement candidates. TruncateLen int + + // State is an object that contains reusable resources needed for the rules to be executed. + // + // If nil, a new state will be allocated. + // + // The State object access is not synchronized. + // State should not be shared between multiple goroutines. + // There are 3 patterns that are safe: + // 1. For single-threaded programs, you can use a single state. + // 2. For controlled concurrency with workers, you can use a per-worker state. + // 3. For uncontrolled concurrency you can use a sync.Pool of states. + // + // Reusing the state properly can increase the performance significantly. + State *RunnerState } type ReportData struct { diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go index 92f6cc34b7..fdc95ab5e7 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go @@ -8,7 +8,7 @@ import ( "go/build" "go/printer" "go/token" - "io/ioutil" + "os" "path/filepath" "reflect" "sort" @@ -56,47 +56,77 @@ type rulesRunner struct { // For named submatches we can't use it as the node can be located // deeper into the tree than the current node. // In those cases we need a more complicated algorithm. - nodePath nodePath + nodePath *nodePath filterParams filterParams } +func newRunnerState(es *engineState) *RunnerState { + gogrepState := gogrep.NewMatcherState() + gogrepSubState := gogrep.NewMatcherState() + state := &RunnerState{ + gogrepState: gogrepState, + gogrepSubState: gogrepSubState, + nodePath: newNodePath(), + evalEnv: es.env.GetEvalEnv(), + typematchState: typematch.NewMatcherState(), + object: &rulesRunner{}, + } + return state +} + +func (state *RunnerState) Reset() { + state.nodePath.stack = state.nodePath.stack[:0] + state.evalEnv.Stack.Reset() +} + func newRulesRunner(ctx *RunContext, buildContext *build.Context, state *engineState, rules *goRuleSet) *rulesRunner { + runnerState := ctx.State + if runnerState == nil { + runnerState = newRunnerState(state) + } else { + runnerState.Reset() + } + importer := newGoImporter(state, goImporterConfig{ fset: ctx.Fset, debugImports: ctx.DebugImports, debugPrint: ctx.DebugPrint, buildContext: buildContext, }) - gogrepState := gogrep.NewMatcherState() + gogrepState := runnerState.gogrepState gogrepState.Types = ctx.Types - gogrepSubState := gogrep.NewMatcherState() + gogrepSubState := runnerState.gogrepSubState gogrepSubState.Types = ctx.Types - evalEnv := state.env.GetEvalEnv() - rr := &rulesRunner{ + evalEnv := runnerState.evalEnv + + rr := runnerState.object + *rr = rulesRunner{ bgContext: context.Background(), ctx: ctx, importer: importer, rules: rules, gogrepState: gogrepState, gogrepSubState: gogrepSubState, - nodePath: newNodePath(), + nodePath: runnerState.nodePath, truncateLen: ctx.TruncateLen, filterParams: filterParams{ - typematchState: typematch.NewMatcherState(), + typematchState: runnerState.typematchState, env: evalEnv, importer: importer, ctx: ctx, }, } + evalEnv.Stack.Push(&rr.filterParams) if ctx.TruncateLen == 0 { rr.truncateLen = 60 } rr.filterParams.nodeText = rr.nodeText rr.filterParams.nodeString = rr.nodeString - rr.filterParams.nodePath = &rr.nodePath + rr.filterParams.nodePath = rr.nodePath rr.filterParams.gogrepSubState = &rr.gogrepSubState + return rr } @@ -136,7 +166,7 @@ func (rr *rulesRunner) fileBytes() []byte { } // TODO(quasilyte): re-use src slice? - src, err := ioutil.ReadFile(rr.filename) + src, err := os.ReadFile(rr.filename) if err != nil || src == nil { // Assign a zero-length slice so rr.src // is never nil during the second fileBytes call. @@ -160,7 +190,7 @@ func (rr *rulesRunner) run(f *ast.File) error { if rr.rules.universal.categorizedNum != 0 { var inspector astWalker - inspector.nodePath = &rr.nodePath + inspector.nodePath = rr.nodePath inspector.filterParams = &rr.filterParams inspector.Walk(f, func(n ast.Node, tag nodetag.Value) { rr.runRules(n, tag) @@ -183,7 +213,7 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { file := rr.ctx.Fset.File(comment.Pos()) for _, rule := range rr.rules.universal.commentRules { - var m commentMatchData + var m matchData if rule.captureGroups { result := rule.pat.FindStringSubmatchIndex(comment.Text) if result == nil { @@ -200,13 +230,13 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { // Consider this pattern: `(?Pfoo)|(bar)`. // If we have `bar` input string, will remain empty. if beginPos < 0 || endPos < 0 { - m.capture = append(m.capture, gogrep.CapturedNode{ + m.match.Capture = append(m.match.Capture, gogrep.CapturedNode{ Name: name, Node: &ast.Comment{Slash: comment.Pos()}, }) continue } - m.capture = append(m.capture, gogrep.CapturedNode{ + m.match.Capture = append(m.match.Capture, gogrep.CapturedNode{ Name: name, Node: &ast.Comment{ Slash: file.Pos(beginPos + file.Offset(comment.Pos())), @@ -214,7 +244,7 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { }, }) } - m.node = &ast.Comment{ + m.match.Node = &ast.Comment{ Slash: file.Pos(result[0] + file.Offset(comment.Pos())), Text: comment.Text[result[0]:result[1]], } @@ -224,7 +254,7 @@ func (rr *rulesRunner) runCommentRules(comment *ast.Comment) { if result == nil { continue } - m.node = &ast.Comment{ + m.match.Node = &ast.Comment{ Slash: file.Pos(result[0] + file.Offset(comment.Pos())), Text: comment.Text[result[0]:result[1]], } @@ -307,7 +337,7 @@ func (rr *rulesRunner) reject(rule goRule, reason string, m matchData) { } } -func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m commentMatchData) bool { +func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m matchData) bool { if rule.base.filter.fn != nil { rr.filterParams.match = m filterResult := rule.base.filter.fn(&rr.filterParams) @@ -345,13 +375,13 @@ func (rr *rulesRunner) handleCommentMatch(rule goCommentRule, m commentMatchData func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool { if rule.filter.fn != nil || rule.do != nil { - rr.filterParams.match = astMatchData{match: m} + rr.filterParams.match = matchData{match: m} } if rule.filter.fn != nil { filterResult := rule.filter.fn(&rr.filterParams) if !filterResult.Matched() { - rr.reject(rule, filterResult.RejectReason(), astMatchData{match: m}) + rr.reject(rule, filterResult.RejectReason(), matchData{match: m}) return false } } @@ -379,9 +409,9 @@ func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool { suggestText = rr.filterParams.suggestString } } else { - messageText = rr.renderMessage(rule.msg, astMatchData{match: m}, true) + messageText = rr.renderMessage(rule.msg, matchData{match: m}, true) if rule.suggestion != "" { - suggestText = rr.renderMessage(rule.suggestion, astMatchData{match: m}, false) + suggestText = rr.renderMessage(rule.suggestion, matchData{match: m}, false) } } diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go index b74740378f..4b740b207c 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go @@ -507,9 +507,14 @@ func (p *Pattern) matchIdentical(state *MatcherState, sub *pattern, typ types.Ty } pkgPath := sub.value.([2]string)[0] typeName := sub.value.([2]string)[1] - // obj.Pkg().Path() may be in a vendor directory. - path := strings.SplitAfter(obj.Pkg().Path(), "/vendor/") - return path[len(path)-1] == pkgPath && typeName == obj.Name() + if typeName != obj.Name() { + return false + } + objPath := obj.Pkg().Path() + if vendorPos := strings.Index(objPath, "/vendor/"); vendorPos != -1 { + objPath = objPath[vendorPos+len("/vendor/"):] + } + return objPath == pkgPath case opFuncNoSeq: typ, ok := typ.(*types.Signature) diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go index 962e9da2a9..d3226db22b 100644 --- a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go +++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go @@ -9,6 +9,8 @@ import ( "regexp/syntax" "strconv" "strings" + + "golang.org/x/exp/typeparams" ) var invalidType = types.Typ[types.Invalid] @@ -295,3 +297,8 @@ func identOf(e ast.Expr) *ast.Ident { return nil } } + +func isTypeParam(typ types.Type) bool { + _, ok := typ.(*typeparams.TypeParam) + return ok +} diff --git a/vendor/github.com/quasilyte/gogrep/README.md b/vendor/github.com/quasilyte/gogrep/README.md index b6c2c47c16..ecf0dc4c7e 100644 --- a/vendor/github.com/quasilyte/gogrep/README.md +++ b/vendor/github.com/quasilyte/gogrep/README.md @@ -24,7 +24,7 @@ $ go get github.com/quasilyte/gogrep To get a gogrep command-line tool, install the `cmd/gogrep` Go submodule. ```bash -$ go install github.com/quasilyte/cmd/gogrep +$ go install github.com/quasilyte/gogrep/cmd/gogrep@latest ``` See [docs/gogrep_cli.md](_docs/gogrep_cli.md) to learn how to use it. diff --git a/vendor/github.com/quasilyte/gogrep/compile.go b/vendor/github.com/quasilyte/gogrep/compile.go index a00a39cc83..31b60dfa5b 100644 --- a/vendor/github.com/quasilyte/gogrep/compile.go +++ b/vendor/github.com/quasilyte/gogrep/compile.go @@ -122,16 +122,19 @@ func (c *compiler) compileNode(n ast.Node) { c.compileStmt(n) case *ast.ValueSpec: c.compileValueSpec(n) - case stmtSlice: - c.compileStmtSlice(n) - case declSlice: - c.compileDeclSlice(n) - case ExprSlice: - c.compileExprSlice(n) case *rangeClause: c.compileRangeClause(n) case *rangeHeader: c.compileRangeHeader(n) + case *NodeSlice: + switch n.Kind { + case StmtNodeSlice: + c.compileStmtSlice(n.stmtSlice) + case DeclNodeSlice: + c.compileDeclSlice(n.declSlice) + case ExprNodeSlice: + c.compileExprSlice(n.exprSlice) + } default: panic(c.errorf(n, "compileNode: unexpected %T", n)) } @@ -1191,7 +1194,7 @@ func (c *compiler) compileSendStmt(n *ast.SendStmt) { c.compileExpr(n.Value) } -func (c *compiler) compileDeclSlice(decls declSlice) { +func (c *compiler) compileDeclSlice(decls []ast.Decl) { c.emitInstOp(opMultiDecl) for _, n := range decls { c.compileDecl(n) @@ -1199,7 +1202,7 @@ func (c *compiler) compileDeclSlice(decls declSlice) { c.emitInstOp(opEnd) } -func (c *compiler) compileStmtSlice(stmts stmtSlice) { +func (c *compiler) compileStmtSlice(stmts []ast.Stmt) { c.emitInstOp(opMultiStmt) insideStmtList := c.insideStmtList c.insideStmtList = true @@ -1210,7 +1213,7 @@ func (c *compiler) compileStmtSlice(stmts stmtSlice) { c.emitInstOp(opEnd) } -func (c *compiler) compileExprSlice(exprs ExprSlice) { +func (c *compiler) compileExprSlice(exprs []ast.Expr) { c.emitInstOp(opMultiExpr) for _, n := range exprs { c.compileExpr(n) diff --git a/vendor/github.com/quasilyte/gogrep/gogrep.go b/vendor/github.com/quasilyte/gogrep/gogrep.go index 313a9a2515..47a03f9b43 100644 --- a/vendor/github.com/quasilyte/gogrep/gogrep.go +++ b/vendor/github.com/quasilyte/gogrep/gogrep.go @@ -11,7 +11,7 @@ import ( ) func IsEmptyNodeSlice(n ast.Node) bool { - if list, ok := n.(NodeSlice); ok { + if list, ok := n.(*NodeSlice); ok { return list.Len() == 0 } return false @@ -62,6 +62,9 @@ type MatcherState struct { // actual matching phase) capture []CapturedNode + nodeSlices []NodeSlice + nodeSlicesUsed int + pc int partial PartialNode @@ -69,7 +72,8 @@ type MatcherState struct { func NewMatcherState() MatcherState { return MatcherState{ - capture: make([]CapturedNode, 0, 8), + capture: make([]CapturedNode, 0, 8), + nodeSlices: make([]NodeSlice, 16), } } @@ -143,34 +147,37 @@ func Compile(config CompileConfig) (*Pattern, PatternInfo, error) { } func Walk(root ast.Node, fn func(n ast.Node) bool) { - switch root := root.(type) { - case ExprSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case stmtSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case fieldSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case identSlice: - for _, e := range root { - ast.Inspect(e, fn) + if root, ok := root.(*NodeSlice); ok { + switch root.Kind { + case ExprNodeSlice: + for _, e := range root.exprSlice { + ast.Inspect(e, fn) + } + case StmtNodeSlice: + for _, e := range root.stmtSlice { + ast.Inspect(e, fn) + } + case FieldNodeSlice: + for _, e := range root.fieldSlice { + ast.Inspect(e, fn) + } + case IdentNodeSlice: + for _, e := range root.identSlice { + ast.Inspect(e, fn) + } + case SpecNodeSlice: + for _, e := range root.specSlice { + ast.Inspect(e, fn) + } + default: + for _, e := range root.declSlice { + ast.Inspect(e, fn) + } } - case specSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - case declSlice: - for _, e := range root { - ast.Inspect(e, fn) - } - default: - ast.Inspect(root, fn) + return } + + ast.Inspect(root, fn) } func newPatternInfo() PatternInfo { diff --git a/vendor/github.com/quasilyte/gogrep/match.go b/vendor/github.com/quasilyte/gogrep/match.go index d4e3243aad..d4b317b96f 100644 --- a/vendor/github.com/quasilyte/gogrep/match.go +++ b/vendor/github.com/quasilyte/gogrep/match.go @@ -45,8 +45,36 @@ func (m *matcher) resetCapture(state *MatcherState) { } } +func (m *matcher) toStmtSlice(state *MatcherState, nodes ...ast.Node) *NodeSlice { + slice := m.allocNodeSlice(state) + var stmts []ast.Stmt + for _, node := range nodes { + switch x := node.(type) { + case nil: + case ast.Stmt: + stmts = append(stmts, x) + case ast.Expr: + stmts = append(stmts, &ast.ExprStmt{X: x}) + default: + panic(fmt.Sprintf("unexpected node type: %T", x)) + } + } + slice.assignStmtSlice(stmts) + return slice +} + +func (m *matcher) allocNodeSlice(state *MatcherState) *NodeSlice { + if state.nodeSlicesUsed < len(state.nodeSlices) { + i := state.nodeSlicesUsed + state.nodeSlicesUsed++ + return &state.nodeSlices[i] + } + return &NodeSlice{} +} + func (m *matcher) MatchNode(state *MatcherState, n ast.Node, accept func(MatchData)) { state.pc = 0 + state.nodeSlicesUsed = 0 inst := m.nextInst(state) switch inst.op { case opMultiStmt: @@ -91,24 +119,32 @@ func (m *matcher) MatchNode(state *MatcherState, n ast.Node, accept func(MatchDa } func (m *matcher) walkDeclSlice(state *MatcherState, decls []ast.Decl, accept func(MatchData)) { - m.walkNodeSlice(state, declSlice(decls), accept) + slice := m.allocNodeSlice(state) + slice.assignDeclSlice(decls) + m.walkNodeSlice(state, slice, accept) } func (m *matcher) walkExprSlice(state *MatcherState, exprs []ast.Expr, accept func(MatchData)) { - m.walkNodeSlice(state, ExprSlice(exprs), accept) + slice := m.allocNodeSlice(state) + slice.assignExprSlice(exprs) + m.walkNodeSlice(state, slice, accept) } func (m *matcher) walkStmtSlice(state *MatcherState, stmts []ast.Stmt, accept func(MatchData)) { - m.walkNodeSlice(state, stmtSlice(stmts), accept) + slice := m.allocNodeSlice(state) + slice.assignStmtSlice(stmts) + m.walkNodeSlice(state, slice, accept) } -func (m *matcher) walkNodeSlice(state *MatcherState, nodes NodeSlice, accept func(MatchData)) { +func (m *matcher) walkNodeSlice(state *MatcherState, nodes *NodeSlice, accept func(MatchData)) { sliceLen := nodes.Len() from := 0 + tmpSlice := m.allocNodeSlice(state) for { state.pc = 1 // FIXME: this is a kludge m.resetCapture(state) - matched, offset := m.matchNodeList(state, nodes.slice(from, sliceLen), true) + nodes.SliceInto(tmpSlice, from, sliceLen) + matched, offset := m.matchNodeList(state, tmpSlice, true) if matched == nil { break } @@ -422,11 +458,11 @@ func (m *matcher) matchNodeWithInst(state *MatcherState, inst instruction, n ast case opIfNamedOptStmt: n, ok := n.(*ast.IfStmt) return ok && n.Else == nil && m.matchNode(state, n.Body) && - m.matchNamed(state, m.stringValue(inst), toStmtSlice(n.Cond, n.Init)) + m.matchNamed(state, m.stringValue(inst), m.toStmtSlice(state, n.Cond, n.Init)) case opIfNamedOptElseStmt: n, ok := n.(*ast.IfStmt) return ok && n.Else != nil && m.matchNode(state, n.Body) && m.matchNode(state, n.Else) && - m.matchNamed(state, m.stringValue(inst), toStmtSlice(n.Cond, n.Init)) + m.matchNamed(state, m.stringValue(inst), m.toStmtSlice(state, n.Cond, n.Init)) case opCaseClause: n, ok := n.(*ast.CaseClause) @@ -641,33 +677,43 @@ func (m *matcher) matchArgList(state *MatcherState, exprs []ast.Expr) bool { } func (m *matcher) matchStmtSlice(state *MatcherState, stmts []ast.Stmt) bool { - matched, _ := m.matchNodeList(state, stmtSlice(stmts), false) + slice := m.allocNodeSlice(state) + slice.assignStmtSlice(stmts) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchExprSlice(state *MatcherState, exprs []ast.Expr) bool { - matched, _ := m.matchNodeList(state, ExprSlice(exprs), false) + slice := m.allocNodeSlice(state) + slice.assignExprSlice(exprs) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchFieldSlice(state *MatcherState, fields []*ast.Field) bool { - matched, _ := m.matchNodeList(state, fieldSlice(fields), false) + slice := m.allocNodeSlice(state) + slice.assignFieldSlice(fields) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchIdentSlice(state *MatcherState, idents []*ast.Ident) bool { - matched, _ := m.matchNodeList(state, identSlice(idents), false) + slice := m.allocNodeSlice(state) + slice.assignIdentSlice(idents) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } func (m *matcher) matchSpecSlice(state *MatcherState, specs []ast.Spec) bool { - matched, _ := m.matchNodeList(state, specSlice(specs), false) + slice := m.allocNodeSlice(state) + slice.assignSpecSlice(specs) + matched, _ := m.matchNodeList(state, slice, false) return matched != nil } // matchNodeList matches two lists of nodes. It uses a common algorithm to match // wildcard patterns with any number of nodes without recursion. -func (m *matcher) matchNodeList(state *MatcherState, nodes NodeSlice, partial bool) (matched ast.Node, offset int) { +func (m *matcher) matchNodeList(state *MatcherState, nodes *NodeSlice, partial bool) (matched ast.Node, offset int) { sliceLen := nodes.Len() inst := m.nextInst(state) if inst.op == opEnd { @@ -727,7 +773,9 @@ func (m *matcher) matchNodeList(state *MatcherState, nodes NodeSlice, partial bo case "", "_": return true } - return m.matchNamed(state, wildName, nodes.slice(wildStart, j)) + slice := m.allocNodeSlice(state) + nodes.SliceInto(slice, wildStart, j) + return m.matchNamed(state, wildName, slice) } for ; inst.op != opEnd || j < sliceLen; inst = m.nextInst(state) { if inst.op != opEnd { @@ -776,7 +824,9 @@ func (m *matcher) matchNodeList(state *MatcherState, nodes NodeSlice, partial bo if !wouldMatch() { return nil, -1 } - return nodes.slice(partialStart, partialEnd), partialEnd + 1 + slice := m.allocNodeSlice(state) + nodes.SliceInto(slice, partialStart, partialEnd) + return slice, partialEnd + 1 } func (m *matcher) matchRangeClause(state *MatcherState, n ast.Node, accept func(MatchData)) { @@ -919,58 +969,56 @@ func equalNodes(x, y ast.Node) bool { if x == nil || y == nil { return x == y } - switch x := x.(type) { - case stmtSlice: - y, ok := y.(stmtSlice) - if !ok || len(x) != len(y) { + if x, ok := x.(*NodeSlice); ok { + y, ok := y.(*NodeSlice) + if !ok || x.Kind != y.Kind || x.Len() != y.Len() { return false } - for i := range x { - if !astequal.Stmt(x[i], y[i]) { - return false + switch x.Kind { + case ExprNodeSlice: + for i, n1 := range x.exprSlice { + n2 := y.exprSlice[i] + if !astequal.Expr(n1, n2) { + return false + } } - } - return true - case ExprSlice: - y, ok := y.(ExprSlice) - if !ok || len(x) != len(y) { - return false - } - for i := range x { - if !astequal.Expr(x[i], y[i]) { - return false + case StmtNodeSlice: + for i, n1 := range x.stmtSlice { + n2 := y.stmtSlice[i] + if !astequal.Stmt(n1, n2) { + return false + } } - } - return true - case declSlice: - y, ok := y.(declSlice) - if !ok || len(x) != len(y) { - return false - } - for i := range x { - if !astequal.Decl(x[i], y[i]) { - return false + case FieldNodeSlice: + for i, n1 := range x.fieldSlice { + n2 := y.fieldSlice[i] + if !astequal.Node(n1, n2) { + return false + } + } + case IdentNodeSlice: + for i, n1 := range x.identSlice { + n2 := y.identSlice[i] + if n1.Name != n2.Name { + return false + } + } + case SpecNodeSlice: + for i, n1 := range x.specSlice { + n2 := y.specSlice[i] + if !astequal.Node(n1, n2) { + return false + } + } + case DeclNodeSlice: + for i, n1 := range x.declSlice { + n2 := y.declSlice[i] + if !astequal.Decl(n1, n2) { + return false + } } } return true - - default: - return astequal.Node(x, y) - } -} - -func toStmtSlice(nodes ...ast.Node) stmtSlice { - var stmts []ast.Stmt - for _, node := range nodes { - switch x := node.(type) { - case nil: - case ast.Stmt: - stmts = append(stmts, x) - case ast.Expr: - stmts = append(stmts, &ast.ExprStmt{X: x}) - default: - panic(fmt.Sprintf("unexpected node type: %T", x)) - } } - return stmtSlice(stmts) + return astequal.Node(x, y) } diff --git a/vendor/github.com/quasilyte/gogrep/parse.go b/vendor/github.com/quasilyte/gogrep/parse.go index aa5ffbf924..3c6854bda0 100644 --- a/vendor/github.com/quasilyte/gogrep/parse.go +++ b/vendor/github.com/quasilyte/gogrep/parse.go @@ -174,7 +174,9 @@ func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, error) { if len(cl.Elts) == 1 { return cl.Elts[0], nil } - return ExprSlice(cl.Elts), nil + slice := &NodeSlice{} + slice.assignExprSlice(cl.Elts) + return slice, nil } // then try as statements @@ -185,7 +187,9 @@ func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, error) { if len(bl.List) == 1 { return bl.List[0], nil } - return stmtSlice(bl.List), nil + slice := &NodeSlice{} + slice.assignStmtSlice(bl.List) + return slice, nil } // Statements is what covers most cases, so it will give // the best overall error message. Show positions @@ -199,7 +203,9 @@ func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, error) { if len(f.Decls) == 1 { return f.Decls[0], nil } - return declSlice(f.Decls), nil + slice := &NodeSlice{} + slice.assignDeclSlice(f.Decls) + return slice, nil } // try as a whole file diff --git a/vendor/github.com/quasilyte/gogrep/slices.go b/vendor/github.com/quasilyte/gogrep/slices.go index 13775a818f..fb969b51bd 100644 --- a/vendor/github.com/quasilyte/gogrep/slices.go +++ b/vendor/github.com/quasilyte/gogrep/slices.go @@ -5,54 +5,146 @@ import ( "go/token" ) -type NodeSlice interface { - At(i int) ast.Node - Len() int - slice(from, to int) NodeSlice - ast.Node -} +type NodeSliceKind uint32 + +const ( + ExprNodeSlice NodeSliceKind = iota + StmtNodeSlice + FieldNodeSlice + IdentNodeSlice + SpecNodeSlice + DeclNodeSlice +) + +type NodeSlice struct { + Kind NodeSliceKind -type ( - ExprSlice []ast.Expr + exprSlice []ast.Expr stmtSlice []ast.Stmt fieldSlice []*ast.Field identSlice []*ast.Ident specSlice []ast.Spec declSlice []ast.Decl -) +} + +func (s *NodeSlice) GetExprSlice() []ast.Expr { return s.exprSlice } +func (s *NodeSlice) GetStmtSlice() []ast.Stmt { return s.stmtSlice } +func (s *NodeSlice) GetFieldSlice() []*ast.Field { return s.fieldSlice } +func (s *NodeSlice) GetIdentSlice() []*ast.Ident { return s.identSlice } +func (s *NodeSlice) GetSpecSlice() []ast.Spec { return s.specSlice } +func (s *NodeSlice) GetDeclSlice() []ast.Decl { return s.declSlice } + +func (s *NodeSlice) assignExprSlice(xs []ast.Expr) { + s.Kind = ExprNodeSlice + s.exprSlice = xs +} + +func (s *NodeSlice) assignStmtSlice(xs []ast.Stmt) { + s.Kind = StmtNodeSlice + s.stmtSlice = xs +} + +func (s *NodeSlice) assignFieldSlice(xs []*ast.Field) { + s.Kind = FieldNodeSlice + s.fieldSlice = xs +} + +func (s *NodeSlice) assignIdentSlice(xs []*ast.Ident) { + s.Kind = IdentNodeSlice + s.identSlice = xs +} + +func (s *NodeSlice) assignSpecSlice(xs []ast.Spec) { + s.Kind = SpecNodeSlice + s.specSlice = xs +} + +func (s *NodeSlice) assignDeclSlice(xs []ast.Decl) { + s.Kind = DeclNodeSlice + s.declSlice = xs +} + +func (s *NodeSlice) Len() int { + switch s.Kind { + case ExprNodeSlice: + return len(s.exprSlice) + case StmtNodeSlice: + return len(s.stmtSlice) + case FieldNodeSlice: + return len(s.fieldSlice) + case IdentNodeSlice: + return len(s.identSlice) + case SpecNodeSlice: + return len(s.specSlice) + default: + return len(s.declSlice) + } +} -func (l ExprSlice) Len() int { return len(l) } -func (l ExprSlice) At(i int) ast.Node { return l[i] } -func (l ExprSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l ExprSlice) Pos() token.Pos { return l[0].Pos() } -func (l ExprSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l stmtSlice) Len() int { return len(l) } -func (l stmtSlice) At(i int) ast.Node { return l[i] } -func (l stmtSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l stmtSlice) Pos() token.Pos { return l[0].Pos() } -func (l stmtSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l fieldSlice) Len() int { return len(l) } -func (l fieldSlice) At(i int) ast.Node { return l[i] } -func (l fieldSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l fieldSlice) Pos() token.Pos { return l[0].Pos() } -func (l fieldSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l identSlice) Len() int { return len(l) } -func (l identSlice) At(i int) ast.Node { return l[i] } -func (l identSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l identSlice) Pos() token.Pos { return l[0].Pos() } -func (l identSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l specSlice) Len() int { return len(l) } -func (l specSlice) At(i int) ast.Node { return l[i] } -func (l specSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l specSlice) Pos() token.Pos { return l[0].Pos() } -func (l specSlice) End() token.Pos { return l[len(l)-1].End() } - -func (l declSlice) Len() int { return len(l) } -func (l declSlice) At(i int) ast.Node { return l[i] } -func (l declSlice) slice(i, j int) NodeSlice { return l[i:j] } -func (l declSlice) Pos() token.Pos { return l[0].Pos() } -func (l declSlice) End() token.Pos { return l[len(l)-1].End() } +func (s *NodeSlice) At(i int) ast.Node { + switch s.Kind { + case ExprNodeSlice: + return s.exprSlice[i] + case StmtNodeSlice: + return s.stmtSlice[i] + case FieldNodeSlice: + return s.fieldSlice[i] + case IdentNodeSlice: + return s.identSlice[i] + case SpecNodeSlice: + return s.specSlice[i] + default: + return s.declSlice[i] + } +} + +func (s *NodeSlice) SliceInto(dst *NodeSlice, i, j int) { + switch s.Kind { + case ExprNodeSlice: + dst.assignExprSlice(s.exprSlice[i:j]) + case StmtNodeSlice: + dst.assignStmtSlice(s.stmtSlice[i:j]) + case FieldNodeSlice: + dst.assignFieldSlice(s.fieldSlice[i:j]) + case IdentNodeSlice: + dst.assignIdentSlice(s.identSlice[i:j]) + case SpecNodeSlice: + dst.assignSpecSlice(s.specSlice[i:j]) + default: + dst.assignDeclSlice(s.declSlice[i:j]) + } +} + +func (s *NodeSlice) Pos() token.Pos { + switch s.Kind { + case ExprNodeSlice: + return s.exprSlice[0].Pos() + case StmtNodeSlice: + return s.stmtSlice[0].Pos() + case FieldNodeSlice: + return s.fieldSlice[0].Pos() + case IdentNodeSlice: + return s.identSlice[0].Pos() + case SpecNodeSlice: + return s.specSlice[0].Pos() + default: + return s.declSlice[0].Pos() + } +} + +func (s *NodeSlice) End() token.Pos { + switch s.Kind { + case ExprNodeSlice: + return s.exprSlice[len(s.exprSlice)-1].End() + case StmtNodeSlice: + return s.stmtSlice[len(s.stmtSlice)-1].End() + case FieldNodeSlice: + return s.fieldSlice[len(s.fieldSlice)-1].End() + case IdentNodeSlice: + return s.identSlice[len(s.identSlice)-1].End() + case SpecNodeSlice: + return s.specSlice[len(s.specSlice)-1].End() + default: + return s.declSlice[len(s.declSlice)-1].End() + } +} diff --git a/vendor/github.com/quasilyte/regex/syntax/README.md b/vendor/github.com/quasilyte/regex/syntax/README.md index 13064ec39a..b70e25ad96 100644 --- a/vendor/github.com/quasilyte/regex/syntax/README.md +++ b/vendor/github.com/quasilyte/regex/syntax/README.md @@ -4,19 +4,17 @@ Package `syntax` provides regular expressions parser as well as AST definitions. ## Rationale -There are several problems with the stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/) package: +The advantages of this package over stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/): -1. It does several transformations during the parsing that make it - hard to do any kind of syntax analysis afterward. +1. Does not transformations/optimizations during the parsing. + The produced parse tree is loseless. -2. The AST used there is optimized for the compilation and - execution inside the [regexp](https://golang.org/pkg/regexp) package. - It's somewhat complicated, especially in a way character ranges are encoded. +2. Simpler AST representation. -3. It only supports [re2](https://github.com/google/re2/wiki/Syntax) syntax. - This parser recognizes most PCRE operations. +3. Can parse most PCRE operations in addition to [re2](https://github.com/google/re2/wiki/Syntax) syntax. + It can also handle PHP/Perl style patterns with delimiters. -4. It's easier to extend this package than something from the standard library. +4. This package is easier to extend than something from the standard library. This package does almost no assumptions about how generated AST is going to be used so it preserves as much syntax information as possible. @@ -24,3 +22,8 @@ so it preserves as much syntax information as possible. It's easy to write another intermediate representation on top of it. The main function of this package is to convert a textual regexp pattern into a more structured form that can be processed more easily. + +## Users + +* [go-critic](https://github.com/go-critic/go-critic) - Go static analyzer +* [NoVerify](https://github.com/VKCOM/noverify) - PHP static analyzer diff --git a/vendor/github.com/quasilyte/regex/syntax/ast.go b/vendor/github.com/quasilyte/regex/syntax/ast.go index 44b7b61bb3..4d21a9432b 100644 --- a/vendor/github.com/quasilyte/regex/syntax/ast.go +++ b/vendor/github.com/quasilyte/regex/syntax/ast.go @@ -1,7 +1,6 @@ package syntax import ( - "fmt" "strings" ) @@ -63,85 +62,3 @@ func (e Expr) LastArg() Expr { type Operation byte type Form byte - -func FormatSyntax(re *Regexp) string { - return formatExprSyntax(re, re.Expr) -} - -func formatExprSyntax(re *Regexp, e Expr) string { - switch e.Op { - case OpChar, OpLiteral: - switch e.Value { - case "{": - return "'{'" - case "}": - return "'}'" - default: - return e.Value - } - case OpString, OpEscapeChar, OpEscapeMeta, OpEscapeOctal, OpEscapeUni, OpEscapeHex, OpPosixClass: - return e.Value - case OpRepeat: - return fmt.Sprintf("(repeat %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) - case OpCaret: - return "^" - case OpDollar: - return "$" - case OpDot: - return "." - case OpQuote: - return fmt.Sprintf("(q %s)", e.Value) - case OpCharRange: - return fmt.Sprintf("%s-%s", formatExprSyntax(re, e.Args[0]), formatExprSyntax(re, e.Args[1])) - case OpCharClass: - return fmt.Sprintf("[%s]", formatArgsSyntax(re, e.Args)) - case OpNegCharClass: - return fmt.Sprintf("[^%s]", formatArgsSyntax(re, e.Args)) - case OpConcat: - return fmt.Sprintf("{%s}", formatArgsSyntax(re, e.Args)) - case OpAlt: - return fmt.Sprintf("(or %s)", formatArgsSyntax(re, e.Args)) - case OpCapture: - return fmt.Sprintf("(capture %s)", formatExprSyntax(re, e.Args[0])) - case OpNamedCapture: - return fmt.Sprintf("(capture %s %s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) - case OpGroup: - return fmt.Sprintf("(group %s)", formatExprSyntax(re, e.Args[0])) - case OpAtomicGroup: - return fmt.Sprintf("(atomic %s)", formatExprSyntax(re, e.Args[0])) - case OpGroupWithFlags: - return fmt.Sprintf("(group %s ?%s)", formatExprSyntax(re, e.Args[0]), e.Args[1].Value) - case OpFlagOnlyGroup: - return fmt.Sprintf("(flags ?%s)", formatExprSyntax(re, e.Args[0])) - case OpPositiveLookahead: - return fmt.Sprintf("(?= %s)", formatExprSyntax(re, e.Args[0])) - case OpNegativeLookahead: - return fmt.Sprintf("(?! %s)", formatExprSyntax(re, e.Args[0])) - case OpPositiveLookbehind: - return fmt.Sprintf("(?<= %s)", formatExprSyntax(re, e.Args[0])) - case OpNegativeLookbehind: - return fmt.Sprintf("(?", e.Op) - } -} - -func formatArgsSyntax(re *Regexp, args []Expr) string { - parts := make([]string, len(args)) - for i, e := range args { - parts[i] = formatExprSyntax(re, e) - } - return strings.Join(parts, " ") -} diff --git a/vendor/github.com/quasilyte/regex/syntax/errors.go b/vendor/github.com/quasilyte/regex/syntax/errors.go index cfafc1d0e8..beefba5f9c 100644 --- a/vendor/github.com/quasilyte/regex/syntax/errors.go +++ b/vendor/github.com/quasilyte/regex/syntax/errors.go @@ -1,9 +1,5 @@ package syntax -import ( - "fmt" -) - type ParseError struct { Pos Position Message string @@ -11,17 +7,21 @@ type ParseError struct { func (e ParseError) Error() string { return e.Message } -func throwfPos(pos Position, format string, args ...interface{}) { - panic(ParseError{ - Pos: pos, - Message: fmt.Sprintf(format, args...), - }) +func throw(pos Position, message string) { + panic(ParseError{Pos: pos, Message: message}) +} + +func throwExpectedFound(pos Position, expected, found string) { + throw(pos, "expected '"+expected+"', found '"+found+"'") +} + +func throwUnexpectedToken(pos Position, token string) { + throw(pos, "unexpected token: "+token) } -func throwErrorf(posBegin, posEnd int, format string, args ...interface{}) { - pos := Position{ - Begin: uint16(posBegin), - End: uint16(posEnd), +func newPos(begin, end int) Position { + return Position{ + Begin: uint16(begin), + End: uint16(end), } - throwfPos(pos, format, args...) } diff --git a/vendor/github.com/quasilyte/regex/syntax/lexer.go b/vendor/github.com/quasilyte/regex/syntax/lexer.go index e92b038c20..aae146c2e6 100644 --- a/vendor/github.com/quasilyte/regex/syntax/lexer.go +++ b/vendor/github.com/quasilyte/regex/syntax/lexer.go @@ -2,7 +2,6 @@ package syntax import ( "strings" - "unicode" "unicode/utf8" ) @@ -111,7 +110,7 @@ func (l *lexer) Peek() token { func (l *lexer) scan() { for l.pos < len(l.input) { ch := l.input[l.pos] - if ch > unicode.MaxASCII { + if ch >= utf8.RuneSelf { _, size := utf8.DecodeRuneInString(l.input[l.pos:]) l.pushTok(tokChar, size) l.maybeInsertConcat() @@ -161,7 +160,7 @@ func (l *lexer) scan() { } else if l.tryScanGroupName(l.pos + 2) { } else if l.tryScanGroupFlags(l.pos + 2) { } else { - throwErrorf(l.pos, l.pos+1, "group token is incomplete") + throw(newPos(l.pos, l.pos+1), "group token is incomplete") } } } else { @@ -190,7 +189,7 @@ func (l *lexer) scanCharClass() { for l.pos < len(l.input) { ch := l.input[l.pos] - if ch > unicode.MaxASCII { + if ch >= utf8.RuneSelf { _, size := utf8.DecodeRuneInString(l.input[l.pos:]) l.pushTok(tokChar, size) continue @@ -224,17 +223,17 @@ func (l *lexer) scanCharClass() { func (l *lexer) scanEscape(insideCharClass bool) { s := l.input if l.pos+1 >= len(s) { - throwErrorf(l.pos, l.pos+1, `unexpected end of pattern: trailing '\'`) + throw(newPos(l.pos, l.pos+1), `unexpected end of pattern: trailing '\'`) } switch { case s[l.pos+1] == 'p' || s[l.pos+1] == 'P': if l.pos+2 >= len(s) { - throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected uni-class-short or '{'") + throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected uni-class-short or '{'") } if s[l.pos+2] == '{' { j := strings.IndexByte(s[l.pos+2:], '}') if j < 0 { - throwErrorf(l.pos, l.pos+2, "can't find closing '}'") + throw(newPos(l.pos, l.pos+2), "can't find closing '}'") } l.pushTok(tokEscapeUniFull, len(`\p{`)+j) } else { @@ -242,12 +241,12 @@ func (l *lexer) scanEscape(insideCharClass bool) { } case s[l.pos+1] == 'x': if l.pos+2 >= len(s) { - throwErrorf(l.pos, l.pos+2, "unexpected end of pattern: expected hex-digit or '{'") + throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected hex-digit or '{'") } if s[l.pos+2] == '{' { j := strings.IndexByte(s[l.pos+2:], '}') if j < 0 { - throwErrorf(l.pos, l.pos+2, "can't find closing '}'") + throw(newPos(l.pos, l.pos+2), "can't find closing '}'") } l.pushTok(tokEscapeHexFull, len(`\x{`)+j) } else { @@ -277,7 +276,7 @@ func (l *lexer) scanEscape(insideCharClass bool) { default: ch := l.byteAt(l.pos + 1) - if ch > unicode.MaxASCII { + if ch >= utf8.RuneSelf { _, size := utf8.DecodeRuneInString(l.input[l.pos+1:]) l.pushTok(tokEscapeChar, len(`\`)+size) return diff --git a/vendor/github.com/quasilyte/regex/syntax/operation.go b/vendor/github.com/quasilyte/regex/syntax/operation.go index 284e5dc5b4..0fc8fc521a 100644 --- a/vendor/github.com/quasilyte/regex/syntax/operation.go +++ b/vendor/github.com/quasilyte/regex/syntax/operation.go @@ -65,46 +65,51 @@ const ( // OpQuote is a \Q...\E enclosed literal. // Examples: `\Q.?\E` `\Q?q[]=1` - // - // Note that closing \E is not mandatory. + // FormQuoteUnclosed: `\Qabc` + // Args[0] - literal value (OpString) OpQuote // OpEscapeChar is a single char escape. // Examples: `\d` `\a` `\n` + // Args[0] - escaped value (OpString) OpEscapeChar // OpEscapeMeta is an escaped meta char. // Examples: `\(` `\[` `\+` + // Args[0] - escaped value (OpString) OpEscapeMeta // OpEscapeOctal is an octal char code escape (up to 3 digits). // Examples: `\123` `\12` + // Args[0] - escaped value (OpString) OpEscapeOctal // OpEscapeHex is a hex char code escape. // Examples: `\x7F` `\xF7` // FormEscapeHexFull examples: `\x{10FFFF}` `\x{F}`. + // Args[0] - escaped value (OpString) OpEscapeHex // OpEscapeUni is a Unicode char class escape. // Examples: `\pS` `\pL` `\PL` // FormEscapeUniFull examples: `\p{Greek}` `\p{Symbol}` `\p{^L}` + // Args[0] - escaped value (OpString) OpEscapeUni // OpCharClass is a char class enclosed in []. // Examples: `[abc]` `[a-z0-9\]]` - // Args - char class elements (can include OpCharRange and OpPosixClass). + // Args - char class elements (can include OpCharRange and OpPosixClass) OpCharClass // OpNegCharClass is a negated char class enclosed in []. // Examples: `[^abc]` `[^a-z0-9\]]` - // Args - char class elements (can include OpCharRange and OpPosixClass). + // Args - char class elements (can include OpCharRange and OpPosixClass) OpNegCharClass // OpCharRange is an inclusive char range inside a char class. // Examples: `0-9` `A-Z` - // Args[0] - range lower bound (OpChar or OpEscape). - // Args[1] - range upper bound (OpChar or OpEscape). + // Args[0] - range lower bound + // Args[1] - range upper bound OpCharRange // OpPosixClass is a named ASCII char set inside a char class. @@ -186,4 +191,5 @@ const ( FormEscapeUniFull FormNamedCaptureAngle FormNamedCaptureQuote + FormQuoteUnclosed ) diff --git a/vendor/github.com/quasilyte/regex/syntax/parser.go b/vendor/github.com/quasilyte/regex/syntax/parser.go index faf0f8b212..f1c154f315 100644 --- a/vendor/github.com/quasilyte/regex/syntax/parser.go +++ b/vendor/github.com/quasilyte/regex/syntax/parser.go @@ -2,7 +2,6 @@ package syntax import ( "errors" - "fmt" "strings" ) @@ -94,13 +93,39 @@ func newParser(opts *ParserOptions) *Parser { } } + p.prefixParselets[tokQ] = func(tok token) *Expr { + litPos := tok.pos + litPos.Begin += uint16(len(`\Q`)) + form := FormQuoteUnclosed + if strings.HasSuffix(p.tokenValue(tok), `\E`) { + litPos.End -= uint16(len(`\E`)) + form = FormDefault + } + lit := p.newExpr(OpString, litPos) + return p.newExprForm(OpQuote, form, tok.pos, lit) + } + p.prefixParselets[tokEscapeHexFull] = func(tok token) *Expr { - return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos) + litPos := tok.pos + litPos.Begin += uint16(len(`\x{`)) + litPos.End -= uint16(len(`}`)) + lit := p.newExpr(OpString, litPos) + return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos, lit) } p.prefixParselets[tokEscapeUniFull] = func(tok token) *Expr { - return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos) + litPos := tok.pos + litPos.Begin += uint16(len(`\p{`)) + litPos.End -= uint16(len(`}`)) + lit := p.newExpr(OpString, litPos) + return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos, lit) } + p.prefixParselets[tokEscapeHex] = func(tok token) *Expr { return p.parseEscape(OpEscapeHex, `\x`, tok) } + p.prefixParselets[tokEscapeOctal] = func(tok token) *Expr { return p.parseEscape(OpEscapeOctal, `\`, tok) } + p.prefixParselets[tokEscapeChar] = func(tok token) *Expr { return p.parseEscape(OpEscapeChar, `\`, tok) } + p.prefixParselets[tokEscapeMeta] = func(tok token) *Expr { return p.parseEscape(OpEscapeMeta, `\`, tok) } + p.prefixParselets[tokEscapeUni] = func(tok token) *Expr { return p.parseEscape(OpEscapeUni, `\p`, tok) } + p.prefixParselets[tokLparen] = func(tok token) *Expr { return p.parseGroup(OpCapture, tok) } p.prefixParselets[tokLparenAtomic] = func(tok token) *Expr { return p.parseGroup(OpAtomicGroup, tok) } p.prefixParselets[tokLparenPositiveLookahead] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookahead, tok) } @@ -163,6 +188,10 @@ func (p *Parser) setValues(e *Expr) { e.Value = p.exprValue(e) } +func (p *Parser) tokenValue(tok token) string { + return p.out.Pattern[tok.pos.Begin:tok.pos.End] +} + func (p *Parser) exprValue(e *Expr) string { return p.out.Pattern[e.Begin():e.End()] } @@ -239,7 +268,7 @@ func (p *Parser) allocExpr() *Expr { func (p *Parser) expect(kind tokenKind) Position { tok := p.lexer.NextToken() if tok.kind != kind { - throwErrorf(int(tok.pos.Begin), int(tok.pos.End), "expected '%s', found '%s'", kind, tok.kind) + throwExpectedFound(tok.pos, kind.String(), tok.kind.String()) } return tok.pos } @@ -248,7 +277,7 @@ func (p *Parser) parseExpr(precedence int) *Expr { tok := p.lexer.NextToken() prefix := p.prefixParselets[tok.kind] if prefix == nil { - throwfPos(tok.pos, "unexpected token: %v", tok) + throwUnexpectedToken(tok.pos, tok.String()) } left := prefix(tok) @@ -277,7 +306,7 @@ func (p *Parser) parseCharClass(op Operation, tok token) *Expr { break } if next.kind == tokNone { - throwfPos(tok.pos, "unterminated '['") + throw(tok.pos, "unterminated '['") } } @@ -400,6 +429,13 @@ func (p *Parser) parseGroupWithFlags(tok token) *Expr { return result } +func (p *Parser) parseEscape(op Operation, prefix string, tok token) *Expr { + litPos := tok.pos + litPos.Begin += uint16(len(prefix)) + lit := p.newExpr(OpString, litPos) + return p.newExpr(op, tok.pos, lit) +} + func (p *Parser) precedenceOf(tok token) int { switch tok.kind { case tokPipe: @@ -436,36 +472,32 @@ func (p *Parser) newPCRE(source string) (*RegexpPCRE, error) { return nil, errors.New("whitespace is not a valid delimiter") } if isAlphanumeric(delim) { - return nil, fmt.Errorf("'%c' is not a valid delimiter", delim) + return nil, errors.New("'" + string(delim) + "' is not a valid delimiter") } } - j := strings.LastIndexByte(source, endDelim) + const delimLen = 1 + j := strings.LastIndexByte(source[delimLen:], endDelim) if j == -1 { - return nil, fmt.Errorf("can't find '%c' ending delimiter", endDelim) + return nil, errors.New("can't find '" + string(endDelim) + "' ending delimiter") } + j += delimLen pcre := &RegexpPCRE{ - Pattern: source[1:j], + Pattern: source[delimLen:j], Source: source, Delim: [2]byte{delim, endDelim}, - Modifiers: source[j+1:], + Modifiers: source[j+delimLen:], } return pcre, nil } var tok2op = [256]Operation{ - tokDollar: OpDollar, - tokCaret: OpCaret, - tokDot: OpDot, - tokChar: OpChar, - tokMinus: OpChar, - tokEscapeChar: OpEscapeChar, - tokEscapeMeta: OpEscapeMeta, - tokEscapeHex: OpEscapeHex, - tokEscapeOctal: OpEscapeOctal, - tokEscapeUni: OpEscapeUni, - tokPosixClass: OpPosixClass, - tokQ: OpQuote, - tokComment: OpComment, + tokDollar: OpDollar, + tokCaret: OpCaret, + tokDot: OpDot, + tokChar: OpChar, + tokMinus: OpChar, + tokPosixClass: OpPosixClass, + tokComment: OpComment, } diff --git a/vendor/github.com/quasilyte/regex/syntax/utils.go b/vendor/github.com/quasilyte/regex/syntax/utils.go index 934680c8ba..e5b6548254 100644 --- a/vendor/github.com/quasilyte/regex/syntax/utils.go +++ b/vendor/github.com/quasilyte/regex/syntax/utils.go @@ -2,7 +2,7 @@ package syntax func isSpace(ch byte) bool { switch ch { - case '\r', '\n', '\t', '\f', '\v': + case '\r', '\n', '\t', '\f', '\v', ' ': return true default: return false diff --git a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml index 0fbf6c04ac..a0e6fd55ea 100644 --- a/vendor/github.com/ryancurrah/gomodguard/.golangci.yml +++ b/vendor/github.com/ryancurrah/gomodguard/.golangci.yml @@ -60,7 +60,6 @@ linters: enable: - asciicheck - bodyclose - - deadcode - dogsled - dupl - durationcheck @@ -100,7 +99,6 @@ linters: - rowserrcheck - sqlclosecheck - staticcheck - - structcheck - stylecheck - testpackage - thelper @@ -109,6 +107,5 @@ linters: - unconvert - unparam - unused - - varcheck - whitespace - wsl diff --git a/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml index 3daecfd798..f3675a9c27 100644 --- a/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml +++ b/vendor/github.com/ryancurrah/gomodguard/.goreleaser.yml @@ -3,12 +3,12 @@ builds: env: - CGO_ENABLED=0 archives: -- replacements: - darwin: Darwin - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 +- name_template: >- + {{ .ProjectName }}_ + {{- title .Os }}_ + {{- if eq .Arch "amd64" }}x86_64 + {{- else if eq .Arch "386" }}i386 + {{- else }}{{ .Arch }}{{ end }} checksum: name_template: 'checksums.txt' dockers: @@ -21,7 +21,6 @@ dockers: dockerfile: Dockerfile.goreleaser build_flag_templates: - "--pull" - - "--build-arg=gomodguard_VERSION={{.Version}}" - "--label=org.opencontainers.image.created={{.Date}}" - "--label=org.opencontainers.image.name={{.ProjectName}}" - "--label=org.opencontainers.image.revision={{.FullCommit}}" diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile b/vendor/github.com/ryancurrah/gomodguard/Dockerfile index 719a0ebdb6..2f1d3340c1 100644 --- a/vendor/github.com/ryancurrah/gomodguard/Dockerfile +++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile @@ -1,16 +1,12 @@ -ARG GO_VERSION=1.14.2 -ARG ALPINE_VERSION=3.11 -ARG gomodguard_VERSION= - # ---- Build container -FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS builder +FROM golang:alpine AS builder WORKDIR /gomodguard COPY . . RUN apk add --no-cache git RUN go build -o gomodguard cmd/gomodguard/main.go # ---- App container -FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} +FROM golang:alpine WORKDIR / RUN apk --no-cache add ca-certificates COPY --from=builder gomodguard/gomodguard / diff --git a/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser index 57a042a67c..ccaaa8959f 100644 --- a/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser +++ b/vendor/github.com/ryancurrah/gomodguard/Dockerfile.goreleaser @@ -1,9 +1,5 @@ -ARG GO_VERSION=1.14.2 -ARG ALPINE_VERSION=3.11 -ARG gomodguard_VERSION= - # ---- App container -FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} +FROM golang:alpine WORKDIR / RUN apk --no-cache add ca-certificates COPY gomodguard /gomodguard diff --git a/vendor/github.com/ryancurrah/gomodguard/Makefile b/vendor/github.com/ryancurrah/gomodguard/Makefile index 766675799d..5235d5aade 100644 --- a/vendor/github.com/ryancurrah/gomodguard/Makefile +++ b/vendor/github.com/ryancurrah/gomodguard/Makefile @@ -24,6 +24,10 @@ cover: dockerrun: dockerbuild docker run -v "${current_dir}/.gomodguard.yaml:/.gomodguard.yaml" ryancurrah/gomodguard:latest +.PHONY: snapshot +snapshot: + goreleaser --rm-dist --snapshot + .PHONY: release release: goreleaser --rm-dist @@ -39,4 +43,4 @@ install-tools-mac: .PHONY: install-go-tools install-go-tools: - go get github.com/t-yuki/gocover-cobertura + go install -v github.com/t-yuki/gocover-cobertura diff --git a/vendor/github.com/ryancurrah/gomodguard/README.md b/vendor/github.com/ryancurrah/gomodguard/README.md index 8e2e416888..4945f01012 100644 --- a/vendor/github.com/ryancurrah/gomodguard/README.md +++ b/vendor/github.com/ryancurrah/gomodguard/README.md @@ -115,7 +115,7 @@ Resulting checkstyle file ## Install ``` -go get -u github.com/ryancurrah/gomodguard/cmd/gomodguard +go install github.com/ryancurrah/gomodguard/cmd/gomodguard ``` ## Develop diff --git a/vendor/github.com/ryancurrah/gomodguard/allowed.go b/vendor/github.com/ryancurrah/gomodguard/allowed.go new file mode 100644 index 0000000000..5b0d26f835 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/allowed.go @@ -0,0 +1,39 @@ +package gomodguard + +import "strings" + +// Allowed is a list of modules and module +// domains that are allowed to be used. +type Allowed struct { + Modules []string `yaml:"modules"` + Domains []string `yaml:"domains"` +} + +// IsAllowedModule returns true if the given module +// name is in the allowed modules list. +func (a *Allowed) IsAllowedModule(moduleName string) bool { + allowedModules := a.Modules + + for i := range allowedModules { + if strings.TrimSpace(moduleName) == strings.TrimSpace(allowedModules[i]) { + return true + } + } + + return false +} + +// IsAllowedModuleDomain returns true if the given modules domain is +// in the allowed module domains list. +func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool { + allowedDomains := a.Domains + + for i := range allowedDomains { + if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), + strings.TrimSpace(strings.ToLower(allowedDomains[i]))) { + return true + } + } + + return false +} diff --git a/vendor/github.com/ryancurrah/gomodguard/blocked.go b/vendor/github.com/ryancurrah/gomodguard/blocked.go new file mode 100644 index 0000000000..2a6e5c2159 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/blocked.go @@ -0,0 +1,189 @@ +package gomodguard + +import ( + "fmt" + "strings" + + "github.com/Masterminds/semver" +) + +// Blocked is a list of modules that are +// blocked and not to be used. +type Blocked struct { + Modules BlockedModules `yaml:"modules"` + Versions BlockedVersions `yaml:"versions"` + LocalReplaceDirectives bool `yaml:"local_replace_directives"` +} + +// BlockedVersion has a version constraint a reason why the the module version is blocked. +type BlockedVersion struct { + Version string `yaml:"version"` + Reason string `yaml:"reason"` +} + +// IsLintedModuleVersionBlocked returns true if a version constraint is specified and the +// linted module version matches the constraint. +func (r *BlockedVersion) IsLintedModuleVersionBlocked(lintedModuleVersion string) bool { + if r.Version == "" { + return false + } + + constraint, err := semver.NewConstraint(r.Version) + if err != nil { + return false + } + + version, err := semver.NewVersion(lintedModuleVersion) + if err != nil { + return false + } + + meet := constraint.Check(version) + + return meet +} + +// Message returns the reason why the module version is blocked. +func (r *BlockedVersion) Message(lintedModuleVersion string) string { + var sb strings.Builder + + // Add version contraint to message. + _, _ = fmt.Fprintf(&sb, "version `%s` is blocked because it does not meet the version constraint `%s`.", + lintedModuleVersion, r.Version) + + if r.Reason == "" { + return sb.String() + } + + // Add reason to message. + _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) + + return sb.String() +} + +// BlockedModule has alternative modules to use and a reason why the module is blocked. +type BlockedModule struct { + Recommendations []string `yaml:"recommendations"` + Reason string `yaml:"reason"` +} + +// IsCurrentModuleARecommendation returns true if the current module is in the Recommendations list. +// +// If the current go.mod file being linted is a recommended module of a +// blocked module and it imports that blocked module, do not set as blocked. +// This could mean that the linted module is a wrapper for that blocked module. +func (r *BlockedModule) IsCurrentModuleARecommendation(currentModuleName string) bool { + if r == nil { + return false + } + + for n := range r.Recommendations { + if strings.TrimSpace(currentModuleName) == strings.TrimSpace(r.Recommendations[n]) { + return true + } + } + + return false +} + +// Message returns the reason why the module is blocked and a list of recommended modules if provided. +func (r *BlockedModule) Message() string { + var sb strings.Builder + + // Add recommendations to message + for i := range r.Recommendations { + switch { + case len(r.Recommendations) == 1: + _, _ = fmt.Fprintf(&sb, "`%s` is a recommended module.", r.Recommendations[i]) + case (i+1) != len(r.Recommendations) && (i+1) == (len(r.Recommendations)-1): + _, _ = fmt.Fprintf(&sb, "`%s` ", r.Recommendations[i]) + case (i + 1) != len(r.Recommendations): + _, _ = fmt.Fprintf(&sb, "`%s`, ", r.Recommendations[i]) + default: + _, _ = fmt.Fprintf(&sb, "and `%s` are recommended modules.", r.Recommendations[i]) + } + } + + if r.Reason == "" { + return sb.String() + } + + // Add reason to message + if sb.Len() == 0 { + _, _ = fmt.Fprintf(&sb, "%s.", strings.TrimRight(r.Reason, ".")) + } else { + _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) + } + + return sb.String() +} + +// HasRecommendations returns true if the blocked package has +// recommended modules. +func (r *BlockedModule) HasRecommendations() bool { + if r == nil { + return false + } + + return len(r.Recommendations) > 0 +} + +// BlockedVersions a list of blocked modules by a version constraint. +type BlockedVersions []map[string]BlockedVersion + +// Get returns the module names that are blocked. +func (b BlockedVersions) Get() []string { + modules := make([]string, len(b)) + + for n := range b { + for module := range b[n] { + modules[n] = module + break + } + } + + return modules +} + +// GetBlockReason returns a block version if one is set for the provided linted module name. +func (b BlockedVersions) GetBlockReason(lintedModuleName string) *BlockedVersion { + for _, blockedModule := range b { + for blockedModuleName, blockedVersion := range blockedModule { + if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { + return &blockedVersion + } + } + } + + return nil +} + +// BlockedModules a list of blocked modules. +type BlockedModules []map[string]BlockedModule + +// Get returns the module names that are blocked. +func (b BlockedModules) Get() []string { + modules := make([]string, len(b)) + + for n := range b { + for module := range b[n] { + modules[n] = module + break + } + } + + return modules +} + +// GetBlockReason returns a block module if one is set for the provided linted module name. +func (b BlockedModules) GetBlockReason(lintedModuleName string) *BlockedModule { + for _, blockedModule := range b { + for blockedModuleName, blockedModule := range blockedModule { + if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { + return &blockedModule + } + } + } + + return nil +} diff --git a/vendor/github.com/ryancurrah/gomodguard/cmd.go b/vendor/github.com/ryancurrah/gomodguard/cmd.go deleted file mode 100644 index a26fac8900..0000000000 --- a/vendor/github.com/ryancurrah/gomodguard/cmd.go +++ /dev/null @@ -1,247 +0,0 @@ -package gomodguard - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - - "github.com/go-xmlfmt/xmlfmt" - "github.com/mitchellh/go-homedir" - "github.com/phayes/checkstyle" - "gopkg.in/yaml.v2" -) - -const ( - errFindingHomedir = "unable to find home directory, %w" - errReadingConfigFile = "could not read config file: %w" - errParsingConfigFile = "could not parse config file: %w" -) - -var ( - configFile = ".gomodguard.yaml" - logger = log.New(os.Stderr, "", 0) - errFindingConfigFile = fmt.Errorf("could not find config file") -) - -// Run the gomodguard linter. Returns the exit code to use. -//nolint:funlen -func Run() int { - var ( - args []string - help bool - noTest bool - report string - reportFile string - issuesExitCode int - cwd, _ = os.Getwd() - ) - - flag.BoolVar(&help, "h", false, "Show this help text") - flag.BoolVar(&help, "help", false, "") - flag.BoolVar(&noTest, "n", false, "Don't lint test files") - flag.BoolVar(&noTest, "no-test", false, "") - flag.StringVar(&report, "r", "", "Report results to one of the following formats: checkstyle. "+ - "A report file destination must also be specified") - flag.StringVar(&report, "report", "", "") - flag.StringVar(&reportFile, "f", "", "Report results to the specified file. A report type must also be specified") - flag.StringVar(&reportFile, "file", "", "") - flag.IntVar(&issuesExitCode, "i", 2, "Exit code when issues were found") - flag.IntVar(&issuesExitCode, "issues-exit-code", 2, "") - flag.Parse() - - report = strings.TrimSpace(strings.ToLower(report)) - - if help { - showHelp() - return 0 - } - - if report != "" && report != "checkstyle" { - logger.Fatalf("error: invalid report type '%s'", report) - } - - if report != "" && reportFile == "" { - logger.Fatalf("error: a report file must be specified when a report is enabled") - } - - if report == "" && reportFile != "" { - logger.Fatalf("error: a report type must be specified when a report file is enabled") - } - - args = flag.Args() - if len(args) == 0 { - args = []string{"./..."} - } - - config, err := GetConfig(configFile) - if err != nil { - logger.Fatalf("error: %s", err) - } - - filteredFiles := GetFilteredFiles(cwd, noTest, args) - - processor, err := NewProcessor(config) - if err != nil { - logger.Fatalf("error: %s", err) - } - - logger.Printf("info: allowed modules, %+v", config.Allowed.Modules) - logger.Printf("info: allowed module domains, %+v", config.Allowed.Domains) - logger.Printf("info: blocked modules, %+v", config.Blocked.Modules.Get()) - logger.Printf("info: blocked modules with version constraints, %+v", config.Blocked.Versions.Get()) - - results := processor.ProcessFiles(filteredFiles) - - if report == "checkstyle" { - err := WriteCheckstyle(reportFile, results) - if err != nil { - logger.Fatalf("error: %s", err) - } - } - - for _, r := range results { - fmt.Println(r.String()) - } - - if len(results) > 0 { - return issuesExitCode - } - - return 0 -} - -// GetConfig from YAML file. -func GetConfig(configFile string) (*Configuration, error) { - config := Configuration{} - - home, err := homedir.Dir() - if err != nil { - return nil, fmt.Errorf(errFindingHomedir, err) - } - - cfgFile := "" - homeDirCfgFile := filepath.Join(home, configFile) - - switch { - case fileExists(configFile): - cfgFile = configFile - case fileExists(homeDirCfgFile): - cfgFile = homeDirCfgFile - default: - return nil, fmt.Errorf("%w: %s %s", errFindingConfigFile, configFile, homeDirCfgFile) - } - - data, err := ioutil.ReadFile(cfgFile) - if err != nil { - return nil, fmt.Errorf(errReadingConfigFile, err) - } - - err = yaml.Unmarshal(data, &config) - if err != nil { - return nil, fmt.Errorf(errParsingConfigFile, err) - } - - return &config, nil -} - -// GetFilteredFiles returns files based on search string arguments and filters. -func GetFilteredFiles(cwd string, skipTests bool, args []string) []string { - var ( - foundFiles = []string{} - filteredFiles = []string{} - ) - - for _, f := range args { - if strings.HasSuffix(f, "/...") { - dir, _ := filepath.Split(f) - - foundFiles = append(foundFiles, expandGoWildcard(dir)...) - - continue - } - - if _, err := os.Stat(f); err == nil { - foundFiles = append(foundFiles, f) - } - } - - // Use relative path to print shorter names, sort out test foundFiles if chosen. - for _, f := range foundFiles { - if skipTests { - if strings.HasSuffix(f, "_test.go") { - continue - } - } - - if relativePath, err := filepath.Rel(cwd, f); err == nil { - filteredFiles = append(filteredFiles, relativePath) - - continue - } - - filteredFiles = append(filteredFiles, f) - } - - return filteredFiles -} - -// showHelp text for command line. -func showHelp() { - helpText := `Usage: gomodguard [files...] -Also supports package syntax but will use it in relative path, i.e. ./pkg/... -Flags:` - fmt.Println(helpText) - flag.PrintDefaults() -} - -// WriteCheckstyle takes the results and writes them to a checkstyle formated file. -func WriteCheckstyle(checkstyleFilePath string, results []Issue) error { - check := checkstyle.New() - - for i := range results { - file := check.EnsureFile(results[i].FileName) - file.AddError(checkstyle.NewError(results[i].LineNumber, 1, checkstyle.SeverityError, results[i].Reason, - "gomodguard")) - } - - checkstyleXML := fmt.Sprintf("\n%s", check.String()) - - err := ioutil.WriteFile(checkstyleFilePath, []byte(xmlfmt.FormatXML(checkstyleXML, "", " ")), 0644) // nolint:gosec - if err != nil { - return err - } - - return nil -} - -// fileExists returns true if the file path provided exists. -func fileExists(filename string) bool { - info, err := os.Stat(filename) - if os.IsNotExist(err) { - return false - } - - return !info.IsDir() -} - -// expandGoWildcard path provided. -func expandGoWildcard(root string) []string { - foundFiles := []string{} - - _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - // Only append go foundFiles. - if !strings.HasSuffix(info.Name(), ".go") { - return nil - } - - foundFiles = append(foundFiles, path) - - return nil - }) - - return foundFiles -} diff --git a/vendor/github.com/ryancurrah/gomodguard/issue.go b/vendor/github.com/ryancurrah/gomodguard/issue.go new file mode 100644 index 0000000000..d60fc3a868 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/issue.go @@ -0,0 +1,20 @@ +package gomodguard + +import ( + "fmt" + "go/token" +) + +// Issue represents the result of one error. +type Issue struct { + FileName string + LineNumber int + Position token.Position + Reason string +} + +// String returns the filename, line +// number and reason of a Issue. +func (r *Issue) String() string { + return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason) +} diff --git a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go b/vendor/github.com/ryancurrah/gomodguard/processor.go similarity index 51% rename from vendor/github.com/ryancurrah/gomodguard/gomodguard.go rename to vendor/github.com/ryancurrah/gomodguard/processor.go index efd0d17ef1..8457e3b070 100644 --- a/vendor/github.com/ryancurrah/gomodguard/gomodguard.go +++ b/vendor/github.com/ryancurrah/gomodguard/processor.go @@ -7,14 +7,11 @@ import ( "fmt" "go/parser" "go/token" - "io/ioutil" "os" "os/exec" "regexp" "strings" - "github.com/Masterminds/semver" - "golang.org/x/mod/modfile" ) @@ -33,248 +30,17 @@ var ( "local replace directive." // startsWithVersion is used to test when a string begins with the version identifier of a module, - // after having stripped the prefix base module name. IE "github.com/foo/bar/v2/baz" => "/v2/baz" + // after having stripped the prefix base module name. IE "github.com/foo/bar/v2/baz" => "v2/baz" // probably indicates that the module is actually github.com/foo/bar/v2, not github.com/foo/bar. - startsWithVersion = regexp.MustCompile(`^\/v[0-9]+`) + startsWithVersion = regexp.MustCompile(`^v[0-9]+`) ) -// BlockedVersion has a version constraint a reason why the the module version is blocked. -type BlockedVersion struct { - Version string `yaml:"version"` - Reason string `yaml:"reason"` -} - -// IsLintedModuleVersionBlocked returns true if a version constraint is specified and the -// linted module version matches the constraint. -func (r *BlockedVersion) IsLintedModuleVersionBlocked(lintedModuleVersion string) bool { - if r.Version == "" { - return false - } - - constraint, err := semver.NewConstraint(r.Version) - if err != nil { - return false - } - - version, err := semver.NewVersion(lintedModuleVersion) - if err != nil { - return false - } - - meet := constraint.Check(version) - - return meet -} - -// Message returns the reason why the module version is blocked. -func (r *BlockedVersion) Message(lintedModuleVersion string) string { - var sb strings.Builder - - // Add version contraint to message. - _, _ = fmt.Fprintf(&sb, "version `%s` is blocked because it does not meet the version constraint `%s`.", - lintedModuleVersion, r.Version) - - if r.Reason == "" { - return sb.String() - } - - // Add reason to message. - _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) - - return sb.String() -} - -// BlockedModule has alternative modules to use and a reason why the module is blocked. -type BlockedModule struct { - Recommendations []string `yaml:"recommendations"` - Reason string `yaml:"reason"` -} - -// IsCurrentModuleARecommendation returns true if the current module is in the Recommendations list. -// -// If the current go.mod file being linted is a recommended module of a -// blocked module and it imports that blocked module, do not set as blocked. -// This could mean that the linted module is a wrapper for that blocked module. -func (r *BlockedModule) IsCurrentModuleARecommendation(currentModuleName string) bool { - if r == nil { - return false - } - - for n := range r.Recommendations { - if strings.TrimSpace(currentModuleName) == strings.TrimSpace(r.Recommendations[n]) { - return true - } - } - - return false -} - -// Message returns the reason why the module is blocked and a list of recommended modules if provided. -func (r *BlockedModule) Message() string { - var sb strings.Builder - - // Add recommendations to message - for i := range r.Recommendations { - switch { - case len(r.Recommendations) == 1: - _, _ = fmt.Fprintf(&sb, "`%s` is a recommended module.", r.Recommendations[i]) - case (i+1) != len(r.Recommendations) && (i+1) == (len(r.Recommendations)-1): - _, _ = fmt.Fprintf(&sb, "`%s` ", r.Recommendations[i]) - case (i + 1) != len(r.Recommendations): - _, _ = fmt.Fprintf(&sb, "`%s`, ", r.Recommendations[i]) - default: - _, _ = fmt.Fprintf(&sb, "and `%s` are recommended modules.", r.Recommendations[i]) - } - } - - if r.Reason == "" { - return sb.String() - } - - // Add reason to message - if sb.Len() == 0 { - _, _ = fmt.Fprintf(&sb, "%s.", strings.TrimRight(r.Reason, ".")) - } else { - _, _ = fmt.Fprintf(&sb, " %s.", strings.TrimRight(r.Reason, ".")) - } - - return sb.String() -} - -// HasRecommendations returns true if the blocked package has -// recommended modules. -func (r *BlockedModule) HasRecommendations() bool { - if r == nil { - return false - } - - return len(r.Recommendations) > 0 -} - -// BlockedVersions a list of blocked modules by a version constraint. -type BlockedVersions []map[string]BlockedVersion - -// Get returns the module names that are blocked. -func (b BlockedVersions) Get() []string { - modules := make([]string, len(b)) - - for n := range b { - for module := range b[n] { - modules[n] = module - break - } - } - - return modules -} - -// GetBlockReason returns a block version if one is set for the provided linted module name. -func (b BlockedVersions) GetBlockReason(lintedModuleName string) *BlockedVersion { - for _, blockedModule := range b { - for blockedModuleName, blockedVersion := range blockedModule { - if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { - return &blockedVersion - } - } - } - - return nil -} - -// BlockedModules a list of blocked modules. -type BlockedModules []map[string]BlockedModule - -// Get returns the module names that are blocked. -func (b BlockedModules) Get() []string { - modules := make([]string, len(b)) - - for n := range b { - for module := range b[n] { - modules[n] = module - break - } - } - - return modules -} - -// GetBlockReason returns a block module if one is set for the provided linted module name. -func (b BlockedModules) GetBlockReason(lintedModuleName string) *BlockedModule { - for _, blockedModule := range b { - for blockedModuleName, blockedModule := range blockedModule { - if strings.TrimSpace(lintedModuleName) == strings.TrimSpace(blockedModuleName) { - return &blockedModule - } - } - } - - return nil -} - -// Allowed is a list of modules and module -// domains that are allowed to be used. -type Allowed struct { - Modules []string `yaml:"modules"` - Domains []string `yaml:"domains"` -} - -// IsAllowedModule returns true if the given module -// name is in the allowed modules list. -func (a *Allowed) IsAllowedModule(moduleName string) bool { - allowedModules := a.Modules - - for i := range allowedModules { - if strings.TrimSpace(moduleName) == strings.TrimSpace(allowedModules[i]) { - return true - } - } - - return false -} - -// IsAllowedModuleDomain returns true if the given modules domain is -// in the allowed module domains list. -func (a *Allowed) IsAllowedModuleDomain(moduleName string) bool { - allowedDomains := a.Domains - - for i := range allowedDomains { - if strings.HasPrefix(strings.TrimSpace(strings.ToLower(moduleName)), - strings.TrimSpace(strings.ToLower(allowedDomains[i]))) { - return true - } - } - - return false -} - -// Blocked is a list of modules that are -// blocked and not to be used. -type Blocked struct { - Modules BlockedModules `yaml:"modules"` - Versions BlockedVersions `yaml:"versions"` - LocalReplaceDirectives bool `yaml:"local_replace_directives"` -} - // Configuration of gomodguard allow and block lists. type Configuration struct { Allowed Allowed `yaml:"allowed"` Blocked Blocked `yaml:"blocked"` } -// Issue represents the result of one error. -type Issue struct { - FileName string - LineNumber int - Position token.Position - Reason string -} - -// String returns the filename, line -// number and reason of a Issue. -func (r *Issue) String() string { - return fmt.Sprintf("%s:%d:1 %s", r.FileName, r.LineNumber, r.Reason) -} - // Processor processes Go files. type Processor struct { Config *Configuration @@ -308,7 +74,7 @@ func NewProcessor(config *Configuration) (*Processor, error) { // and lints them. func (p *Processor) ProcessFiles(filenames []string) (issues []Issue) { for _, filename := range filenames { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { issues = append(issues, Issue{ FileName: filename, @@ -443,14 +209,7 @@ func (p *Processor) SetBlockedModules() { //nolint:gocognit,funlen // isBlockedPackageFromModFile returns the block reason if the package is blocked. func (p *Processor) isBlockedPackageFromModFile(packageName string) []string { for blockedModuleName, blockReasons := range p.blockedModulesFromModFile { - if strings.HasPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) { - // Test if a versioned module matched its base version - // ie github.com/foo/bar/v2 matched github.com/foo/bar, even though the former may be allowed. - suffix := strings.TrimPrefix(strings.TrimSpace(packageName), strings.TrimSpace(blockedModuleName)) - if startsWithVersion.MatchString(suffix) { - continue - } - + if isPackageInModule(packageName, blockedModuleName) { formattedReasons := make([]string, 0, len(blockReasons)) for _, blockReason := range blockReasons { @@ -470,7 +229,7 @@ func loadGoModFile() ([]byte, error) { _ = cmd.Start() if stdout == nil { - return ioutil.ReadFile(goModFilename) + return os.ReadFile(goModFilename) } buf := new(bytes.Buffer) @@ -480,20 +239,53 @@ func loadGoModFile() ([]byte, error) { err := json.Unmarshal(buf.Bytes(), &goEnv) if err != nil { - return ioutil.ReadFile(goModFilename) + return os.ReadFile(goModFilename) } if _, ok := goEnv["GOMOD"]; !ok { - return ioutil.ReadFile(goModFilename) + return os.ReadFile(goModFilename) } if _, err = os.Stat(goEnv["GOMOD"]); os.IsNotExist(err) { - return ioutil.ReadFile(goModFilename) + return os.ReadFile(goModFilename) } if goEnv["GOMOD"] == "/dev/null" { return nil, errors.New("current working directory must have a go.mod file") } - return ioutil.ReadFile(goEnv["GOMOD"]) + return os.ReadFile(goEnv["GOMOD"]) +} + +// isPackageInModule determines if a package is apart of the specified go module. +func isPackageInModule(pkg, mod string) bool { + // Split pkg and mod paths into parts + pkgPart := strings.Split(pkg, "/") + modPart := strings.Split(mod, "/") + + pkgPartMatches := 0 + + // Count number of times pkg path matches the mod path + for i, m := range modPart { + if len(pkgPart) > i && pkgPart[i] == m { + pkgPartMatches++ + } + } + + // If pkgPartMatches are not the same length as modPart + // than the package is not in this module + if pkgPartMatches != len(modPart) { + return false + } + + if len(pkgPart) > len(modPart) { + // If pkgPart path starts with a major version + // than the package is not in this module as + // major versions are completely different modules + if startsWithVersion.MatchString(pkgPart[len(modPart)]) { + return false + } + } + + return true } diff --git a/vendor/github.com/ryancurrah/gomodguard/tools.go b/vendor/github.com/ryancurrah/gomodguard/tools.go new file mode 100644 index 0000000000..d56bcc7470 --- /dev/null +++ b/vendor/github.com/ryancurrah/gomodguard/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package gomodguard + +import _ "github.com/t-yuki/gocover-cobertura" diff --git a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go index bc42dfb3a0..55e931a898 100644 --- a/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go +++ b/vendor/github.com/ryanrolds/sqlclosecheck/pkg/analyzer/analyzer.go @@ -9,22 +9,38 @@ import ( ) const ( - rowsName = "Rows" - stmtName = "Stmt" - closeMethod = "Close" + rowsName = "Rows" + stmtName = "Stmt" + namedStmtName = "NamedStmt" + closeMethod = "Close" +) + +type action uint8 + +const ( + actionUnhandled action = iota + actionHandled + actionReturned + actionPassed + actionClosed + actionUnvaluedCall + actionUnvaluedDefer + actionNoOp ) var ( sqlPackages = []string{ "database/sql", "github.com/jmoiron/sqlx", + "github.com/jackc/pgx/v5", + "github.com/jackc/pgx/v5/pgxpool", } ) func NewAnalyzer() *analysis.Analyzer { return &analysis.Analyzer{ Name: "sqlclosecheck", - Doc: "Checks that sql.Rows and sql.Stmt are closed.", + Doc: "Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed.", Run: run, Requires: []*analysis.Analyzer{ buildssa.Analyzer, @@ -33,7 +49,10 @@ func NewAnalyzer() *analysis.Analyzer { } func run(pass *analysis.Pass) (interface{}, error) { - pssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + pssa, ok := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + if !ok { + return nil, nil + } // Build list of types we are looking for targetTypes := getTargetTypes(pssa, sqlPackages) @@ -47,20 +66,18 @@ func run(pass *analysis.Pass) (interface{}, error) { for _, f := range funcs { for _, b := range f.Blocks { for i := range b.Instrs { - // Check if instruction is call that returns a target type + // Check if instruction is call that returns a target pointer type targetValues := getTargetTypesValues(b, i, targetTypes) if len(targetValues) == 0 { continue } - // log.Printf("%s", f.Name()) - // For each found target check if they are closed and deferred for _, targetValue := range targetValues { refs := (*targetValue.value).Referrers() isClosed := checkClosed(refs, targetTypes) if !isClosed { - pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt was not closed") + pass.Reportf((targetValue.instr).Pos(), "Rows/Stmt/NamedStmt was not closed") } checkDeferred(pass, refs, targetTypes, false) @@ -72,17 +89,22 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } -func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointer { - targets := []*types.Pointer{} +func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []any { + targets := []any{} for _, sqlPkg := range targetPackages { pkg := pssa.Pkg.Prog.ImportedPackage(sqlPkg) if pkg == nil { // the SQL package being checked isn't imported - return targets + continue } - rowsType := getTypePointerFromName(pkg, rowsName) + rowsPtrType := getTypePointerFromName(pkg, rowsName) + if rowsPtrType != nil { + targets = append(targets, rowsPtrType) + } + + rowsType := getTypeFromName(pkg, rowsName) if rowsType != nil { targets = append(targets, rowsType) } @@ -91,6 +113,11 @@ func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointe if stmtType != nil { targets = append(targets, stmtType) } + + namedStmtType := getTypePointerFromName(pkg, namedStmtName) + if namedStmtType != nil { + targets = append(targets, namedStmtType) + } } return targets @@ -99,7 +126,7 @@ func getTargetTypes(pssa *buildssa.SSA, targetPackages []string) []*types.Pointe func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer { pkgType := pkg.Type(name) if pkgType == nil { - // this package does not use Rows/Stmt + // this package does not use Rows/Stmt/NamedStmt return nil } @@ -112,12 +139,28 @@ func getTypePointerFromName(pkg *ssa.Package, name string) *types.Pointer { return types.NewPointer(named) } +func getTypeFromName(pkg *ssa.Package, name string) *types.Named { + pkgType := pkg.Type(name) + if pkgType == nil { + // this package does not use Rows/Stmt + return nil + } + + obj := pkgType.Object() + named, ok := obj.Type().(*types.Named) + if !ok { + return nil + } + + return named +} + type targetValue struct { value *ssa.Value instr ssa.Instruction } -func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer) []targetValue { +func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []any) []targetValue { targetValues := []targetValue{} instr := b.Instrs[i] @@ -133,21 +176,32 @@ func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer varType := v.Type() for _, targetType := range targetTypes { - if !types.Identical(varType, targetType) { + var tt types.Type + + switch t := targetType.(type) { + case *types.Pointer: + tt = t + case *types.Named: + tt = t + default: + continue + } + + if !types.Identical(varType, tt) { continue } for _, cRef := range *call.Referrers() { switch instr := cRef.(type) { case *ssa.Call: - if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), targetType) { + if len(instr.Call.Args) >= 1 && types.Identical(instr.Call.Args[0].Type(), tt) { targetValues = append(targetValues, targetValue{ value: &instr.Call.Args[0], instr: call, }) } case ssa.Value: - if types.Identical(instr.Type(), targetType) { + if types.Identical(instr.Type(), tt) { targetValues = append(targetValues, targetValue{ value: &instr, instr: call, @@ -161,77 +215,86 @@ func getTargetTypesValues(b *ssa.BasicBlock, i int, targetTypes []*types.Pointer return targetValues } -func checkClosed(refs *[]ssa.Instruction, targetTypes []*types.Pointer) bool { +func checkClosed(refs *[]ssa.Instruction, targetTypes []any) bool { numInstrs := len(*refs) for idx, ref := range *refs { - // log.Printf("%T - %s", ref, ref) - action := getAction(ref, targetTypes) switch action { - case "closed": + case actionClosed, actionReturned, actionHandled: return true - case "passed": + case actionPassed: // Passed and not used after if numInstrs == idx+1 { return true } - case "returned": - return true - case "handled": - return true - default: - // log.Printf(action) } } return false } -func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string { +func getAction(instr ssa.Instruction, targetTypes []any) action { switch instr := instr.(type) { case *ssa.Defer: - if instr.Call.Value == nil { - return "unvalued defer" + if instr.Call.Value != nil { + name := instr.Call.Value.Name() + if name == closeMethod { + return actionClosed + } } - name := instr.Call.Value.Name() - if name == closeMethod { - return "closed" + if instr.Call.Method != nil { + name := instr.Call.Method.Name() + if name == closeMethod { + return actionClosed + } } + + return actionUnvaluedDefer case *ssa.Call: if instr.Call.Value == nil { - return "unvalued call" + return actionUnvaluedCall } isTarget := false - receiver := instr.Call.StaticCallee().Signature.Recv() - if receiver != nil { - isTarget = isTargetType(receiver.Type(), targetTypes) + staticCallee := instr.Call.StaticCallee() + if staticCallee != nil { + receiver := instr.Call.StaticCallee().Signature.Recv() + if receiver != nil { + isTarget = isTargetType(receiver.Type(), targetTypes) + } } name := instr.Call.Value.Name() if isTarget && name == closeMethod { - return "closed" + return actionClosed } if !isTarget { - return "passed" + return actionPassed } case *ssa.Phi: - return "passed" + return actionPassed case *ssa.MakeInterface: - return "passed" + return actionPassed case *ssa.Store: + // A Row/Stmt is stored in a struct, which may be closed later + // by a different flow. + if _, ok := instr.Addr.(*ssa.FieldAddr); ok { + return actionReturned + } + if len(*instr.Addr.Referrers()) == 0 { - return "noop" + return actionNoOp } for _, aRef := range *instr.Addr.Referrers() { if c, ok := aRef.(*ssa.MakeClosure); ok { - f := c.Fn.(*ssa.Function) - for _, b := range f.Blocks { - if checkClosed(&b.Instrs, targetTypes) { - return "handled" + if f, ok := c.Fn.(*ssa.Function); ok { + for _, b := range f.Blocks { + if checkClosed(&b.Instrs, targetTypes) { + return actionHandled + } } } } @@ -239,32 +302,45 @@ func getAction(instr ssa.Instruction, targetTypes []*types.Pointer) string { case *ssa.UnOp: instrType := instr.Type() for _, targetType := range targetTypes { - if types.Identical(instrType, targetType) { + var tt types.Type + + switch t := targetType.(type) { + case *types.Pointer: + tt = t + case *types.Named: + tt = t + default: + continue + } + + if types.Identical(instrType, tt) { if checkClosed(instr.Referrers(), targetTypes) { - return "handled" + return actionHandled } } } case *ssa.FieldAddr: if checkClosed(instr.Referrers(), targetTypes) { - return "handled" + return actionHandled } case *ssa.Return: - return "returned" - default: - // log.Printf("%s", instr) + return actionReturned } - return "unhandled" + return actionUnhandled } -func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []*types.Pointer, inDefer bool) { +func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes []any, inDefer bool) { for _, instr := range *instrs { switch instr := instr.(type) { case *ssa.Defer: if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod { return } + + if instr.Call.Method != nil && instr.Call.Method.Name() == closeMethod { + return + } case *ssa.Call: if instr.Call.Value != nil && instr.Call.Value.Name() == closeMethod { if !inDefer { @@ -280,17 +356,28 @@ func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes [ for _, aRef := range *instr.Addr.Referrers() { if c, ok := aRef.(*ssa.MakeClosure); ok { - f := c.Fn.(*ssa.Function) - - for _, b := range f.Blocks { - checkDeferred(pass, &b.Instrs, targetTypes, true) + if f, ok := c.Fn.(*ssa.Function); ok { + for _, b := range f.Blocks { + checkDeferred(pass, &b.Instrs, targetTypes, true) + } } } } case *ssa.UnOp: instrType := instr.Type() for _, targetType := range targetTypes { - if types.Identical(instrType, targetType) { + var tt types.Type + + switch t := targetType.(type) { + case *types.Pointer: + tt = t + case *types.Named: + tt = t + default: + continue + } + + if types.Identical(instrType, tt) { checkDeferred(pass, instr.Referrers(), targetTypes, inDefer) } } @@ -300,10 +387,17 @@ func checkDeferred(pass *analysis.Pass, instrs *[]ssa.Instruction, targetTypes [ } } -func isTargetType(t types.Type, targetTypes []*types.Pointer) bool { +func isTargetType(t types.Type, targetTypes []any) bool { for _, targetType := range targetTypes { - if types.Identical(t, targetType) { - return true + switch tt := targetType.(type) { + case *types.Pointer: + if types.Identical(t, tt) { + return true + } + case *types.Named: + if types.Identical(t, tt) { + return true + } } } diff --git a/vendor/github.com/sanposhiho/wastedassign/v2/README.md b/vendor/github.com/sanposhiho/wastedassign/v2/README.md index cd2deedad5..6b736f7f1d 100644 --- a/vendor/github.com/sanposhiho/wastedassign/v2/README.md +++ b/vendor/github.com/sanposhiho/wastedassign/v2/README.md @@ -39,10 +39,19 @@ $ go vet -vettool=`which wastedassign` sample.go ## Installation + +### Go version < 1.16 + ``` go get -u github.com/sanposhiho/wastedassign/v2/cmd/wastedassign ``` +### Go version 1.16+ + +``` +go install github.com/sanposhiho/wastedassign/v2/cmd/wastedassign@latest +``` + ## Usage ``` diff --git a/vendor/github.com/sashamelentyev/interfacebloat/LICENSE b/vendor/github.com/sashamelentyev/interfacebloat/LICENSE new file mode 100644 index 0000000000..a52602b3da --- /dev/null +++ b/vendor/github.com/sashamelentyev/interfacebloat/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Sasha Melentyev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sashamelentyev/interfacebloat/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/interfacebloat/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..4a6afdf8cb --- /dev/null +++ b/vendor/github.com/sashamelentyev/interfacebloat/pkg/analyzer/analyzer.go @@ -0,0 +1,57 @@ +package analyzer + +import ( + "flag" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const InterfaceMaxMethodsFlag = "max" + +const defaultMaxMethods = 10 + +// New returns new interfacebloat analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "interfacebloat", + Doc: "A linter that checks the number of methods inside an interface.", + Run: run, + Flags: flags(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.Int(InterfaceMaxMethodsFlag, 10, "maximum number of methods") + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + maxMethods, ok := pass.Analyzer.Flags.Lookup(InterfaceMaxMethodsFlag).Value.(flag.Getter).Get().(int) + if !ok { + maxMethods = defaultMaxMethods + } + + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + filter := []ast.Node{ + (*ast.InterfaceType)(nil), + } + + insp.Preorder(filter, func(node ast.Node) { + i, ok := node.(*ast.InterfaceType) + if !ok { + return + } + + if len(i.Methods.List) > maxMethods { + pass.Reportf(node.Pos(), `the interface has more than %d methods: %d`, maxMethods, len(i.Methods.List)) + } + }) + + return nil, nil +} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/LICENSE b/vendor/github.com/sashamelentyev/usestdlibvars/LICENSE new file mode 100644 index 0000000000..a52602b3da --- /dev/null +++ b/vendor/github.com/sashamelentyev/usestdlibvars/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Sasha Melentyev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..4d6ab3ccab --- /dev/null +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go @@ -0,0 +1,569 @@ +package analyzer + +import ( + "flag" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping" +) + +const ( + TimeWeekdayFlag = "time-weekday" + TimeMonthFlag = "time-month" + TimeLayoutFlag = "time-layout" + CryptoHashFlag = "crypto-hash" + HTTPMethodFlag = "http-method" + HTTPStatusCodeFlag = "http-status-code" + RPCDefaultPathFlag = "rpc-default-path" + OSDevNullFlag = "os-dev-null" + SQLIsolationLevelFlag = "sql-isolation-level" + TLSSignatureSchemeFlag = "tls-signature-scheme" + ConstantKindFlag = "constant-kind" + SyslogPriorityFlag = "syslog-priority" +) + +// New returns new usestdlibvars analyzer. +func New() *analysis.Analyzer { + return &analysis.Analyzer{ + Name: "usestdlibvars", + Doc: "A linter that detect the possibility to use variables/constants from the Go standard library.", + Run: run, + Flags: flags(), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func flags() flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.Bool(HTTPMethodFlag, true, "suggest the use of http.MethodXX") + flags.Bool(HTTPStatusCodeFlag, true, "suggest the use of http.StatusXX") + flags.Bool(TimeWeekdayFlag, false, "suggest the use of time.Weekday.String()") + flags.Bool(TimeMonthFlag, false, "suggest the use of time.Month.String()") + flags.Bool(TimeLayoutFlag, false, "suggest the use of time.Layout") + flags.Bool(CryptoHashFlag, false, "suggest the use of crypto.Hash.String()") + flags.Bool(RPCDefaultPathFlag, false, "suggest the use of rpc.DefaultXXPath") + flags.Bool(OSDevNullFlag, false, "[DEPRECATED] suggest the use of os.DevNull") + flags.Bool(SQLIsolationLevelFlag, false, "suggest the use of sql.LevelXX.String()") + flags.Bool(TLSSignatureSchemeFlag, false, "suggest the use of tls.SignatureScheme.String()") + flags.Bool(ConstantKindFlag, false, "suggest the use of constant.Kind.String()") + flags.Bool(SyslogPriorityFlag, false, "[DEPRECATED] suggest the use of syslog.Priority") + return *flags +} + +func run(pass *analysis.Pass) (interface{}, error) { + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + types := []ast.Node{ + (*ast.CallExpr)(nil), + (*ast.BasicLit)(nil), + (*ast.CompositeLit)(nil), + (*ast.IfStmt)(nil), + (*ast.SwitchStmt)(nil), + (*ast.ForStmt)(nil), + } + + insp.Preorder(types, func(node ast.Node) { + switch n := node.(type) { + case *ast.CallExpr: + fun, ok := n.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + x, ok := fun.X.(*ast.Ident) + if !ok { + return + } + + funArgs(pass, x, fun, n.Args) + + case *ast.BasicLit: + for _, c := range []struct { + flag string + checkFunc func(pass *analysis.Pass, basicLit *ast.BasicLit) + }{ + {flag: TimeWeekdayFlag, checkFunc: checkTimeWeekday}, + {flag: TimeMonthFlag, checkFunc: checkTimeMonth}, + {flag: TimeLayoutFlag, checkFunc: checkTimeLayout}, + {flag: CryptoHashFlag, checkFunc: checkCryptoHash}, + {flag: RPCDefaultPathFlag, checkFunc: checkRPCDefaultPath}, + {flag: OSDevNullFlag, checkFunc: checkOSDevNull}, + {flag: SQLIsolationLevelFlag, checkFunc: checkSQLIsolationLevel}, + {flag: TLSSignatureSchemeFlag, checkFunc: checkTLSSignatureScheme}, + {flag: ConstantKindFlag, checkFunc: checkConstantKind}, + } { + if lookupFlag(pass, c.flag) { + c.checkFunc(pass, n) + } + } + + case *ast.CompositeLit: + typ, ok := n.Type.(*ast.SelectorExpr) + if !ok { + return + } + + x, ok := typ.X.(*ast.Ident) + if !ok { + return + } + + typeElts(pass, x, typ, n.Elts) + + case *ast.IfStmt: + cond, ok := n.Cond.(*ast.BinaryExpr) + if !ok { + return + } + + switch cond.Op { + case token.LSS, token.GTR, token.LEQ, token.GEQ: + return + } + + x, ok := cond.X.(*ast.SelectorExpr) + if !ok { + return + } + + y, ok := cond.Y.(*ast.BasicLit) + if !ok { + return + } + + ifElseStmt(pass, x, y) + + case *ast.SwitchStmt: + x, ok := n.Tag.(*ast.SelectorExpr) + if ok { + switchStmt(pass, x, n.Body.List) + } else { + switchStmtAsIfElseStmt(pass, n.Body.List) + } + + case *ast.ForStmt: + cond, ok := n.Cond.(*ast.BinaryExpr) + if !ok { + return + } + + x, ok := cond.X.(*ast.SelectorExpr) + if !ok { + return + } + + y, ok := cond.Y.(*ast.BasicLit) + if !ok { + return + } + + ifElseStmt(pass, x, y) + } + }) + + return nil, nil +} + +// funArgs checks arguments of function or method. +func funArgs(pass *analysis.Pass, x *ast.Ident, fun *ast.SelectorExpr, args []ast.Expr) { + switch x.Name { + case "http": + switch fun.Sel.Name { + // http.NewRequest(http.MethodGet, "localhost", http.NoBody) + case "NewRequest": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 3, 0, token.STRING); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + + // http.NewRequestWithContext(context.Background(), http.MethodGet, "localhost", http.NoBody) + case "NewRequestWithContext": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 4, 1, token.STRING); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + + // http.Error(w, err, http.StatusInternalServerError) + case "Error": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 3, 2, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + + // http.StatusText(http.StatusOK) + case "StatusText": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 1, 0, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + + // http.Redirect(w, r, "localhost", http.StatusMovedPermanently) + case "Redirect": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 4, 3, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + + // http.RedirectHandler("localhost", http.StatusMovedPermanently) + case "RedirectHandler": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 2, 1, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + case "httptest": + if fun.Sel.Name == "NewRequest" { + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 3, 0, token.STRING); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + } + case "syslog": + if !lookupFlag(pass, SyslogPriorityFlag) { + return + } + + switch fun.Sel.Name { + case "New": + if basicLit := getBasicLitFromArgs(args, 2, 0, token.INT); basicLit != nil { + checkSyslogPriority(pass, basicLit) + } + + case "Dial": + if basicLit := getBasicLitFromArgs(args, 4, 2, token.INT); basicLit != nil { + checkSyslogPriority(pass, basicLit) + } + + case "NewLogger": + if basicLit := getBasicLitFromArgs(args, 2, 0, token.INT); basicLit != nil { + checkSyslogPriority(pass, basicLit) + } + } + default: + // w.WriteHeader(http.StatusOk) + if fun.Sel.Name == "WriteHeader" { + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromArgs(args, 1, 0, token.INT); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + } +} + +// typeElts checks elements of type. +func typeElts(pass *analysis.Pass, x *ast.Ident, typ *ast.SelectorExpr, elts []ast.Expr) { + switch x.Name { + case "http": + switch typ.Sel.Name { + // http.Request{Method: http.MethodGet} + case "Request": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + if basicLit := getBasicLitFromElts(elts, "Method"); basicLit != nil { + checkHTTPMethod(pass, basicLit) + } + + // http.Response{StatusCode: http.StatusOK} + case "Response": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromElts(elts, "StatusCode"); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + case "httptest": + if typ.Sel.Name == "ResponseRecorder" { + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + if basicLit := getBasicLitFromElts(elts, "Code"); basicLit != nil { + checkHTTPStatusCode(pass, basicLit) + } + } + } +} + +// ifElseStmt checks X and Y in if-else-statement. +func ifElseStmt(pass *analysis.Pass, x *ast.SelectorExpr, y *ast.BasicLit) { + switch x.Sel.Name { + case "StatusCode": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + checkHTTPStatusCode(pass, y) + + case "Method": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + checkHTTPMethod(pass, y) + } +} + +func switchStmt(pass *analysis.Pass, x *ast.SelectorExpr, cases []ast.Stmt) { + var checkFunc func(pass *analysis.Pass, basicLit *ast.BasicLit) + + switch x.Sel.Name { + case "StatusCode": + if !lookupFlag(pass, HTTPStatusCodeFlag) { + return + } + + checkFunc = checkHTTPStatusCode + + case "Method": + if !lookupFlag(pass, HTTPMethodFlag) { + return + } + + checkFunc = checkHTTPMethod + + default: + return + } + + for _, c := range cases { + caseClause, ok := c.(*ast.CaseClause) + if !ok { + continue + } + + for _, expr := range caseClause.List { + basicLit, ok := expr.(*ast.BasicLit) + if !ok { + continue + } + + checkFunc(pass, basicLit) + } + } +} + +func switchStmtAsIfElseStmt(pass *analysis.Pass, cases []ast.Stmt) { + for _, c := range cases { + caseClause, ok := c.(*ast.CaseClause) + if !ok { + continue + } + + for _, expr := range caseClause.List { + binaryExpr, ok := expr.(*ast.BinaryExpr) + if !ok { + continue + } + + x, ok := binaryExpr.X.(*ast.SelectorExpr) + if !ok { + continue + } + + y, ok := binaryExpr.Y.(*ast.BasicLit) + if !ok { + continue + } + + ifElseStmt(pass, x, y) + } + } +} + +func lookupFlag(pass *analysis.Pass, name string) bool { + return pass.Analyzer.Flags.Lookup(name).Value.(flag.Getter).Get().(bool) +} + +func checkHTTPMethod(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + key := strings.ToUpper(currentVal) + + if newVal, ok := mapping.HTTPMethod[key]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkHTTPStatusCode(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.HTTPStatusCode[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTimeWeekday(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TimeWeekday[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTimeMonth(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TimeMonth[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTimeLayout(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TimeLayout[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkCryptoHash(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.CryptoHash[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkRPCDefaultPath(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.RPCDefaultPath[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkOSDevNull(pass *analysis.Pass, basicLit *ast.BasicLit) {} + +func checkSQLIsolationLevel(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.SQLIsolationLevel[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkTLSSignatureScheme(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.TLSSignatureScheme[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkConstantKind(pass *analysis.Pass, basicLit *ast.BasicLit) { + currentVal := getBasicLitValue(basicLit) + + if newVal, ok := mapping.ConstantKind[currentVal]; ok { + report(pass, basicLit.Pos(), currentVal, newVal) + } +} + +func checkSyslogPriority(pass *analysis.Pass, basicLit *ast.BasicLit) {} + +// getBasicLitFromArgs gets the *ast.BasicLit of a function argument. +// +// Arguments: +// - args - slice of function arguments +// - count - expected number of argument in function +// - idx - index of the argument to get the *ast.BasicLit +// - typ - argument type +func getBasicLitFromArgs(args []ast.Expr, count, idx int, typ token.Token) *ast.BasicLit { + if len(args) != count { + return nil + } + + if idx > count-1 { + return nil + } + + basicLit, ok := args[idx].(*ast.BasicLit) + if !ok { + return nil + } + + if basicLit.Kind != typ { + return nil + } + + return basicLit +} + +// getBasicLitFromElts gets the *ast.BasicLit of a struct elements. +// +// Arguments: +// - elts - slice of a struct elements +// - key - name of key in struct +func getBasicLitFromElts(elts []ast.Expr, key string) *ast.BasicLit { + for _, e := range elts { + keyValueExpr, ok := e.(*ast.KeyValueExpr) + if !ok { + continue + } + + ident, ok := keyValueExpr.Key.(*ast.Ident) + if !ok { + continue + } + + if ident.Name != key { + continue + } + + if basicLit, ok := keyValueExpr.Value.(*ast.BasicLit); ok { + return basicLit + } + } + + return nil +} + +// getBasicLitValue returns BasicLit value as string without quotes. +func getBasicLitValue(basicLit *ast.BasicLit) string { + var val strings.Builder + for _, r := range basicLit.Value { + if r == '"' { + continue + } else { + val.WriteRune(r) + } + } + return val.String() +} + +func report(pass *analysis.Pass, pos token.Pos, currentVal, newVal string) { + pass.Reportf(pos, "%q can be replaced by %s", currentVal, newVal) +} diff --git a/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping/mapping.go b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping/mapping.go new file mode 100644 index 0000000000..5bad23d288 --- /dev/null +++ b/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping/mapping.go @@ -0,0 +1,202 @@ +package mapping + +import ( + "crypto" + "crypto/tls" + "database/sql" + "go/constant" + "net/http" + "net/rpc" + "strconv" + "time" +) + +var CryptoHash = map[string]string{ + crypto.MD4.String(): "crypto.MD4.String()", + crypto.MD5.String(): "crypto.MD5.String()", + crypto.SHA1.String(): "crypto.SHA1.String()", + crypto.SHA224.String(): "crypto.SHA224.String()", + crypto.SHA256.String(): "crypto.SHA256.String()", + crypto.SHA384.String(): "crypto.SHA384.String()", + crypto.SHA512.String(): "crypto.SHA512.String()", + crypto.MD5SHA1.String(): "crypto.MD5SHA1.String()", + crypto.RIPEMD160.String(): "crypto.RIPEMD160.String()", + crypto.SHA3_224.String(): "crypto.SHA3_224.String()", + crypto.SHA3_256.String(): "crypto.SHA3_256.String()", + crypto.SHA3_384.String(): "crypto.SHA3_384.String()", + crypto.SHA3_512.String(): "crypto.SHA3_512.String()", + crypto.SHA512_224.String(): "crypto.SHA512_224.String()", + crypto.SHA512_256.String(): "crypto.SHA512_256.String()", + crypto.BLAKE2s_256.String(): "crypto.BLAKE2s_256.String()", + crypto.BLAKE2b_256.String(): "crypto.BLAKE2b_256.String()", + crypto.BLAKE2b_384.String(): "crypto.BLAKE2b_384.String()", + crypto.BLAKE2b_512.String(): "crypto.BLAKE2b_512.String()", +} + +var HTTPMethod = map[string]string{ + http.MethodGet: "http.MethodGet", + http.MethodHead: "http.MethodHead", + http.MethodPost: "http.MethodPost", + http.MethodPut: "http.MethodPut", + http.MethodPatch: "http.MethodPatch", + http.MethodDelete: "http.MethodDelete", + http.MethodConnect: "http.MethodConnect", + http.MethodOptions: "http.MethodOptions", + http.MethodTrace: "http.MethodTrace", +} + +var HTTPStatusCode = map[string]string{ + strconv.Itoa(http.StatusContinue): "http.StatusContinue", + strconv.Itoa(http.StatusSwitchingProtocols): "http.StatusSwitchingProtocols", + strconv.Itoa(http.StatusProcessing): "http.StatusProcessing", + strconv.Itoa(http.StatusEarlyHints): "http.StatusEarlyHints", + + strconv.Itoa(http.StatusOK): "http.StatusOK", + strconv.Itoa(http.StatusCreated): "http.StatusCreated", + strconv.Itoa(http.StatusAccepted): "http.StatusAccepted", + strconv.Itoa(http.StatusNonAuthoritativeInfo): "http.StatusNonAuthoritativeInfo", + strconv.Itoa(http.StatusNoContent): "http.StatusNoContent", + strconv.Itoa(http.StatusResetContent): "http.StatusResetContent", + strconv.Itoa(http.StatusPartialContent): "http.StatusPartialContent", + strconv.Itoa(http.StatusMultiStatus): "http.StatusMultiStatus", + strconv.Itoa(http.StatusAlreadyReported): "http.StatusAlreadyReported", + strconv.Itoa(http.StatusIMUsed): "http.StatusIMUsed", + + strconv.Itoa(http.StatusMultipleChoices): "http.StatusMultipleChoices", + strconv.Itoa(http.StatusMovedPermanently): "http.StatusMovedPermanently", + strconv.Itoa(http.StatusFound): "http.StatusFound", + strconv.Itoa(http.StatusSeeOther): "http.StatusSeeOther", + strconv.Itoa(http.StatusNotModified): "http.StatusNotModified", + strconv.Itoa(http.StatusUseProxy): "http.StatusUseProxy", + strconv.Itoa(http.StatusTemporaryRedirect): "http.StatusTemporaryRedirect", + strconv.Itoa(http.StatusPermanentRedirect): "http.StatusPermanentRedirect", + + strconv.Itoa(http.StatusBadRequest): "http.StatusBadRequest", + strconv.Itoa(http.StatusUnauthorized): "http.StatusUnauthorized", + strconv.Itoa(http.StatusPaymentRequired): "http.StatusPaymentRequired", + strconv.Itoa(http.StatusForbidden): "http.StatusForbidden", + strconv.Itoa(http.StatusNotFound): "http.StatusNotFound", + strconv.Itoa(http.StatusMethodNotAllowed): "http.StatusMethodNotAllowed", + strconv.Itoa(http.StatusNotAcceptable): "http.StatusNotAcceptable", + strconv.Itoa(http.StatusProxyAuthRequired): "http.StatusProxyAuthRequired", + strconv.Itoa(http.StatusRequestTimeout): "http.StatusRequestTimeout", + strconv.Itoa(http.StatusConflict): "http.StatusConflict", + strconv.Itoa(http.StatusGone): "http.StatusGone", + strconv.Itoa(http.StatusLengthRequired): "http.StatusLengthRequired", + strconv.Itoa(http.StatusPreconditionFailed): "http.StatusPreconditionFailed", + strconv.Itoa(http.StatusRequestEntityTooLarge): "http.StatusRequestEntityTooLarge", + strconv.Itoa(http.StatusRequestURITooLong): "http.StatusRequestURITooLong", + strconv.Itoa(http.StatusUnsupportedMediaType): "http.StatusUnsupportedMediaType", + strconv.Itoa(http.StatusRequestedRangeNotSatisfiable): "http.StatusRequestedRangeNotSatisfiable", + strconv.Itoa(http.StatusExpectationFailed): "http.StatusExpectationFailed", + strconv.Itoa(http.StatusTeapot): "http.StatusTeapot", + strconv.Itoa(http.StatusMisdirectedRequest): "http.StatusMisdirectedRequest", + strconv.Itoa(http.StatusUnprocessableEntity): "http.StatusUnprocessableEntity", + strconv.Itoa(http.StatusLocked): "http.StatusLocked", + strconv.Itoa(http.StatusFailedDependency): "http.StatusFailedDependency", + strconv.Itoa(http.StatusTooEarly): "http.StatusTooEarly", + strconv.Itoa(http.StatusUpgradeRequired): "http.StatusUpgradeRequired", + strconv.Itoa(http.StatusPreconditionRequired): "http.StatusPreconditionRequired", + strconv.Itoa(http.StatusTooManyRequests): "http.StatusTooManyRequests", + strconv.Itoa(http.StatusRequestHeaderFieldsTooLarge): "http.StatusRequestHeaderFieldsTooLarge", + strconv.Itoa(http.StatusUnavailableForLegalReasons): "http.StatusUnavailableForLegalReasons", + + strconv.Itoa(http.StatusInternalServerError): "http.StatusInternalServerError", + strconv.Itoa(http.StatusNotImplemented): "http.StatusNotImplemented", + strconv.Itoa(http.StatusBadGateway): "http.StatusBadGateway", + strconv.Itoa(http.StatusServiceUnavailable): "http.StatusServiceUnavailable", + strconv.Itoa(http.StatusGatewayTimeout): "http.StatusGatewayTimeout", + strconv.Itoa(http.StatusHTTPVersionNotSupported): "http.StatusHTTPVersionNotSupported", + strconv.Itoa(http.StatusVariantAlsoNegotiates): "http.StatusVariantAlsoNegotiates", + strconv.Itoa(http.StatusInsufficientStorage): "http.StatusInsufficientStorage", + strconv.Itoa(http.StatusLoopDetected): "http.StatusLoopDetected", + strconv.Itoa(http.StatusNotExtended): "http.StatusNotExtended", + strconv.Itoa(http.StatusNetworkAuthenticationRequired): "http.StatusNetworkAuthenticationRequired", +} + +var RPCDefaultPath = map[string]string{ + rpc.DefaultRPCPath: "rpc.DefaultRPCPath", + rpc.DefaultDebugPath: "rpc.DefaultDebugPath", +} + +var TimeWeekday = map[string]string{ + time.Sunday.String(): "time.Sunday.String()", + time.Monday.String(): "time.Monday.String()", + time.Tuesday.String(): "time.Tuesday.String()", + time.Wednesday.String(): "time.Wednesday.String()", + time.Thursday.String(): "time.Thursday.String()", + time.Friday.String(): "time.Friday.String()", + time.Saturday.String(): "time.Saturday.String()", +} + +var TimeMonth = map[string]string{ + time.January.String(): "time.January.String()", + time.February.String(): "time.February.String()", + time.March.String(): "time.March.String()", + time.April.String(): "time.April.String()", + time.May.String(): "time.May.String()", + time.June.String(): "time.June.String()", + time.July.String(): "time.July.String()", + time.August.String(): "time.August.String()", + time.September.String(): "time.September.String()", + time.October.String(): "time.October.String()", + time.November.String(): "time.November.String()", + time.December.String(): "time.December.String()", +} + +var TimeLayout = map[string]string{ + time.Layout: "time.Layout", + time.ANSIC: "time.ANSIC", + time.UnixDate: "time.UnixDate", + time.RubyDate: "time.RubyDate", + time.RFC822: "time.RFC822", + time.RFC822Z: "time.RFC822Z", + time.RFC850: "time.RFC850", + time.RFC1123: "time.RFC1123", + time.RFC1123Z: "time.RFC1123Z", + time.RFC3339: "time.RFC3339", + time.RFC3339Nano: "time.RFC3339Nano", + time.Kitchen: "time.Kitchen", + time.Stamp: "time.Stamp", + time.StampMilli: "time.StampMilli", + time.StampMicro: "time.StampMicro", + time.StampNano: "time.StampNano", + time.DateTime: "time.DateTime", + time.DateOnly: "time.DateOnly", + time.TimeOnly: "time.TimeOnly", +} + +var SQLIsolationLevel = map[string]string{ + // sql.LevelDefault.String(): "sql.LevelDefault.String()", + sql.LevelReadUncommitted.String(): "sql.LevelReadUncommitted.String()", + sql.LevelReadCommitted.String(): "sql.LevelReadCommitted.String()", + sql.LevelWriteCommitted.String(): "sql.LevelWriteCommitted.String()", + sql.LevelRepeatableRead.String(): "sql.LevelRepeatableRead.String()", + // sql.LevelSnapshot.String(): "sql.LevelSnapshot.String()", + // sql.LevelSerializable.String(): "sql.LevelSerializable.String()", + // sql.LevelLinearizable.String(): "sql.LevelLinearizable.String()", +} + +var TLSSignatureScheme = map[string]string{ + tls.PSSWithSHA256.String(): "tls.PSSWithSHA256.String()", + tls.ECDSAWithP256AndSHA256.String(): "tls.ECDSAWithP256AndSHA256.String()", + tls.Ed25519.String(): "tls.Ed25519.String()", + tls.PSSWithSHA384.String(): "tls.PSSWithSHA384.String()", + tls.PSSWithSHA512.String(): "tls.PSSWithSHA512.String()", + tls.PKCS1WithSHA256.String(): "tls.PKCS1WithSHA256.String()", + tls.PKCS1WithSHA384.String(): "tls.PKCS1WithSHA384.String()", + tls.PKCS1WithSHA512.String(): "tls.PKCS1WithSHA512.String()", + tls.ECDSAWithP384AndSHA384.String(): "tls.ECDSAWithP384AndSHA384.String()", + tls.ECDSAWithP521AndSHA512.String(): "tls.ECDSAWithP521AndSHA512.String()", + tls.PKCS1WithSHA1.String(): "tls.PKCS1WithSHA1.String()", + tls.ECDSAWithSHA1.String(): "tls.ECDSAWithSHA1.String()", +} + +var ConstantKind = map[string]string{ + // constant.Unknown.String(): "constant.Unknown.String()", + constant.Bool.String(): "constant.Bool.String()", + constant.String.String(): "constant.String.String()", + constant.Int.String(): "constant.Int.String()", + constant.Float.String(): "constant.Float.String()", + constant.Complex.String(): "constant.Complex.String()", +} diff --git a/vendor/github.com/securego/gosec/v2/.golangci.yml b/vendor/github.com/securego/gosec/v2/.golangci.yml index 64e4e4515c..d591dc2493 100644 --- a/vendor/github.com/securego/gosec/v2/.golangci.yml +++ b/vendor/github.com/securego/gosec/v2/.golangci.yml @@ -1,33 +1,45 @@ linters: enable: - - asciicheck - - bodyclose - - deadcode - - depguard - - dogsled - - durationcheck - - errcheck - - errorlint - - exportloopref - - gci - - gofmt - - gofumpt - - goimports - - gosec - - gosimple - - govet - - importas - - ineffassign - - megacheck - - misspell - - nakedret - - nolintlint - - revive - - staticcheck - - structcheck - - typecheck - - unconvert - - unparam - - unused - - varcheck - - wastedassign + - asciicheck + - bodyclose + - dogsled + - durationcheck + - errcheck + - errorlint + - exportloopref + - gci + - ginkgolinter + - gochecknoinits + - gofmt + - gofumpt + - goimports + - gosec + - gosimple + - govet + - importas + - ineffassign + - megacheck + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - typecheck + - unconvert + - unparam + - unused + - wastedassign + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/securego) + revive: + rules: + - name: dot-imports + disabled: true + +run: + timeout: 5m diff --git a/vendor/github.com/securego/gosec/v2/.goreleaser.yml b/vendor/github.com/securego/gosec/v2/.goreleaser.yml index 539be5659e..e3c903e7a7 100644 --- a/vendor/github.com/securego/gosec/v2/.goreleaser.yml +++ b/vendor/github.com/securego/gosec/v2/.goreleaser.yml @@ -18,6 +18,7 @@ builds: goarch: - amd64 - arm64 + - s390x ldflags: -X main.Version={{.Version}} -X main.GitTag={{.Tag}} -X main.BuildDate={{.Date}} env: - CGO_ENABLED=0 @@ -25,6 +26,11 @@ builds: signs: - cmd: cosign stdin: '{{ .Env.COSIGN_PASSWORD}}' - args: ["sign-blob", "--key=/tmp/cosign.key", "--output=${signature}", "${artifact}"] + args: + - "sign-blob" + - "--key=/tmp/cosign.key" + - "--output=${signature}" + - "${artifact}" + - "--yes" artifacts: all diff --git a/vendor/github.com/securego/gosec/v2/Dockerfile b/vendor/github.com/securego/gosec/v2/Dockerfile index b57c981fb3..1bf94da7d7 100644 --- a/vendor/github.com/securego/gosec/v2/Dockerfile +++ b/vendor/github.com/securego/gosec/v2/Dockerfile @@ -1,11 +1,11 @@ ARG GO_VERSION FROM golang:${GO_VERSION}-alpine AS builder -RUN apk add --no-cache ca-certificates make git curl gcc libc-dev -RUN mkdir -p /build +RUN apk add --no-cache ca-certificates make git curl gcc libc-dev \ + && mkdir -p /build WORKDIR /build COPY . /build/ -RUN go mod download -RUN make build-linux +RUN go mod download \ + && make build-linux FROM golang:${GO_VERSION}-alpine RUN apk add --no-cache ca-certificates bash git gcc libc-dev openssh diff --git a/vendor/github.com/securego/gosec/v2/Makefile b/vendor/github.com/securego/gosec/v2/Makefile index 5dbfd7764b..dcfb4b2ed9 100644 --- a/vendor/github.com/securego/gosec/v2/Makefile +++ b/vendor/github.com/securego/gosec/v2/Makefile @@ -2,27 +2,37 @@ GIT_TAG?= $(shell git describe --always --tags) BIN = gosec FMT_CMD = $(gofmt -s -l -w $(find . -type f -name '*.go' -not -path './vendor/*') | tee /dev/stderr) IMAGE_REPO = securego -BUILD_DATE ?= $(shell date +%Y-%m-%d) +DATE_FMT=+%Y-%m-%d +ifdef SOURCE_DATE_EPOCH + BUILD_DATE ?= $(shell date -u -d "@$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u -r "$(SOURCE_DATE_EPOCH)" "$(DATE_FMT)" 2>/dev/null || date -u "$(DATE_FMT)") +else + BUILD_DATE ?= $(shell date "$(DATE_FMT)") +endif BUILDFLAGS := "-w -s -X 'main.Version=$(GIT_TAG)' -X 'main.GitTag=$(GIT_TAG)' -X 'main.BuildDate=$(BUILD_DATE)'" CGO_ENABLED = 0 GO := GO111MODULE=on go -GO_NOMOD :=GO111MODULE=off go GOPATH ?= $(shell $(GO) env GOPATH) GOBIN ?= $(GOPATH)/bin -GOLINT ?= $(GOBIN)/golint GOSEC ?= $(GOBIN)/gosec GINKGO ?= $(GOBIN)/ginkgo -GO_VERSION = 1.18 +GO_MINOR_VERSION = $(shell $(GO) version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) +GOVULN_MIN_VERSION = 17 +GO_VERSION = 1.20 default: $(MAKE) build install-test-deps: go install github.com/onsi/ginkgo/v2/ginkgo@latest - $(GO_NOMOD) get -u golang.org/x/crypto/ssh - $(GO_NOMOD) get -u github.com/lib/pq + go install golang.org/x/crypto/...@latest + go install github.com/lib/pq/...@latest -test: install-test-deps build fmt lint sec +install-govulncheck: + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + go install golang.org/x/vuln/cmd/govulncheck@latest; \ + fi + +test: install-test-deps build-race fmt vet sec govulncheck $(GINKGO) -v --fail-fast fmt: @@ -30,10 +40,7 @@ fmt: @FORMATTED=`$(GO) fmt ./...` @([ ! -z "$(FORMATTED)" ] && printf "Fixed unformatted files:\n$(FORMATTED)") || true -lint: - @echo "LINTING: golint" - $(GO_NOMOD) get -u golang.org/x/lint/golint - $(GOLINT) -set_exit_status ./... +vet: @echo "VETTING" $(GO) vet ./... @@ -45,12 +52,21 @@ sec: @echo "SECURITY SCANNING" ./$(BIN) ./... +govulncheck: install-govulncheck + @echo "CHECKING VULNERABILITIES" + @if [ $(GO_MINOR_VERSION) -gt $(GOVULN_MIN_VERSION) ]; then \ + govulncheck ./...; \ + fi + test-coverage: install-test-deps go test -race -v -count=1 -coverprofile=coverage.out ./... build: go build -o $(BIN) ./cmd/gosec/ +build-race: + go build -race -o $(BIN) ./cmd/gosec/ + clean: rm -rf build vendor dist coverage.txt rm -f release image $(BIN) @@ -73,4 +89,7 @@ image-push: image docker push $(IMAGE_REPO)/$(BIN):$(GIT_TAG) docker push $(IMAGE_REPO)/$(BIN):latest -.PHONY: test build clean release image image-push +tlsconfig: + go generate ./... + +.PHONY: test build clean release image image-push tlsconfig diff --git a/vendor/github.com/securego/gosec/v2/README.md b/vendor/github.com/securego/gosec/v2/README.md index cc824393b8..d9a33f12a6 100644 --- a/vendor/github.com/securego/gosec/v2/README.md +++ b/vendor/github.com/securego/gosec/v2/README.md @@ -1,7 +1,7 @@ # gosec - Golang Security Checker -Inspects source code for security problems by scanning the Go AST. +Inspects source code for security problems by scanning the Go AST and SSA code representation. @@ -21,7 +21,7 @@ You may obtain a copy of the License [here](http://www.apache.org/licenses/LICEN [![Docs](https://readthedocs.org/projects/docs/badge/?version=latest)](https://securego.io/) [![Downloads](https://img.shields.io/github/downloads/securego/gosec/total.svg)](https://github.com/securego/gosec/releases) [![Docker Pulls](https://img.shields.io/docker/pulls/securego/gosec.svg)](https://hub.docker.com/r/securego/gosec/tags) -[![Slack](http://securego.herokuapp.com/badge.svg)](http://securego.herokuapp.com) +[![Slack](https://img.shields.io/badge/Slack-4A154B?style=for-the-badge&logo=slack&logoColor=white)](http://securego.slack.com) ## Install @@ -68,7 +68,7 @@ jobs: GO111MODULE: on steps: - name: Checkout Source - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run Gosec Security Scanner uses: securego/gosec@master with: @@ -98,7 +98,7 @@ jobs: GO111MODULE: on steps: - name: Checkout Source - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run Gosec Security Scanner uses: securego/gosec@master with: @@ -157,7 +157,7 @@ directory you can supply `./...` as the input argument. - G304: File path provided as taint input - G305: File traversal when extracting zip/tar archive - G306: Poor file permissions used when writing to a new file -- G307: Deferring a method which returns an error +- G307: Poor file permissions used when creating a file with os.Create - G401: Detect the usage of DES, RC4, MD5 or SHA1 - G402: Look for bad TLS connection settings - G403: Ensure minimum RSA key length of 2048 bits @@ -168,10 +168,12 @@ directory you can supply `./...` as the input argument. - G504: Import blocklist: net/http/cgi - G505: Import blocklist: crypto/sha1 - G601: Implicit memory aliasing of items from a range statement +- G602: Slice access out of bounds ### Retired rules - G105: Audit the use of math/big.Int.Exp - [CVE is fixed](https://github.com/golang/go/issues/15184) +- G307: Deferring a method which returns an error - causing more inconvenience than fixing a security issue, despite the details from this [blog post](https://www.joeshaw.org/dont-defer-close-on-writable-files/) ### Selecting rules @@ -188,7 +190,7 @@ $ gosec -exclude=G303 ./... ### CWE Mapping -Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/master/issue.go#L50). +Every issue detected by `gosec` is mapped to a [CWE (Common Weakness Enumeration)](http://cwe.mitre.org/data/index.html) which describes in more generic terms the vulnerability. The exact mapping can be found [here](https://github.com/securego/gosec/blob/master/issue/issue.go#L50). ### Configuration @@ -272,31 +274,33 @@ gosec -exclude-generated ./... ### Annotating code -As with all automated detection tools, there will be cases of false positives. In cases where gosec reports a failure that has been manually verified as being safe, +As with all automated detection tools, there will be cases of false positives. +In cases where gosec reports a failure that has been manually verified as being safe, it is possible to annotate the code with a comment that starts with `#nosec`. + The `#nosec` comment should have the format `#nosec [RuleList] [-- Justification]`. -The annotation causes gosec to stop processing any further nodes within the -AST so can apply to a whole block or more granularly to a single expression. +The `#nosec` comment needs to be placed on the line where the warning is reported. ```go - -import "md5" //#nosec - - -func main(){ - - /* #nosec */ - if x > y { - h := md5.New() // this will also be ignored - } - +func main() { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // #nosec G402 + }, + } + + client := &http.Client{Transport: tr} + _, err := client.Get("https://golang.org/") + if err != nil { + fmt.Println(err) + } } - ``` -When a specific false positive has been identified and verified as safe, you may wish to suppress only that single rule (or a specific set of rules) -within a section of code, while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within +When a specific false positive has been identified and verified as safe, you may +wish to suppress only that single rule (or a specific set of rules) within a section of code, +while continuing to scan for other problems. To do this, you can list the rule(s) to be suppressed within the `#nosec` annotation, e.g: `/* #nosec G401 */` or `//#nosec G201 G202 G203` You could put the description or justification text for the annotation. The diff --git a/vendor/github.com/securego/gosec/v2/USERS.md b/vendor/github.com/securego/gosec/v2/USERS.md index ffc0560814..9b6e4eeee4 100644 --- a/vendor/github.com/securego/gosec/v2/USERS.md +++ b/vendor/github.com/securego/gosec/v2/USERS.md @@ -15,6 +15,7 @@ This is a list of gosec's users. Please send a pull request with your organisati 9. [PingCAP/tidb](https://github.com/pingcap/tidb) 10. [Checkmarx](https://www.checkmarx.com/) 11. [SeatGeek](https://www.seatgeek.com/) +12. [reMarkable](https://remarkable.com) ## Projects diff --git a/vendor/github.com/securego/gosec/v2/action.yml b/vendor/github.com/securego/gosec/v2/action.yml index aab6c8039d..aba47b60ce 100644 --- a/vendor/github.com/securego/gosec/v2/action.yml +++ b/vendor/github.com/securego/gosec/v2/action.yml @@ -10,7 +10,7 @@ inputs: runs: using: 'docker' - image: 'docker://securego/gosec' + image: 'docker://securego/gosec:2.18.1' args: - ${{ inputs.args }} diff --git a/vendor/github.com/securego/gosec/v2/analyzer.go b/vendor/github.com/securego/gosec/v2/analyzer.go index 0f9fef2d12..1fd1f5649a 100644 --- a/vendor/github.com/securego/gosec/v2/analyzer.go +++ b/vendor/github.com/securego/gosec/v2/analyzer.go @@ -31,6 +31,10 @@ import ( "strings" "sync" + "github.com/securego/gosec/v2/analyzers" + "github.com/securego/gosec/v2/issue" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/packages" ) @@ -42,7 +46,10 @@ const LoadMode = packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | - packages.NeedSyntax + packages.NeedSyntax | + packages.NeedModule | + packages.NeedEmbedFiles | + packages.NeedEmbedPatterns const externalSuppressionJustification = "Globally suppressed." @@ -50,9 +57,83 @@ const aliasOfAllRules = "*" var generatedCodePattern = regexp.MustCompile(`^// Code generated .* DO NOT EDIT\.$`) +type ignore struct { + start int + end int + suppressions map[string][]issue.SuppressionInfo +} + +type ignores map[string][]ignore + +func newIgnores() ignores { + return make(map[string][]ignore) +} + +func (i ignores) parseLine(line string) (int, int) { + parts := strings.Split(line, "-") + start, err := strconv.Atoi(parts[0]) + if err != nil { + start = 0 + } + end := start + if len(parts) > 1 { + if e, err := strconv.Atoi(parts[1]); err == nil { + end = e + } + } + return start, end +} + +func (i ignores) add(file string, line string, suppressions map[string]issue.SuppressionInfo) { + is := []ignore{} + if _, ok := i[file]; ok { + is = i[file] + } + found := false + start, end := i.parseLine(line) + for _, ig := range is { + if ig.start <= start && ig.end >= end { + found = true + for r, s := range suppressions { + ss, ok := ig.suppressions[r] + if !ok { + ss = []issue.SuppressionInfo{} + } + ss = append(ss, s) + ig.suppressions[r] = ss + } + break + } + } + if !found { + ig := ignore{ + start: start, + end: end, + suppressions: map[string][]issue.SuppressionInfo{}, + } + for r, s := range suppressions { + ig.suppressions[r] = []issue.SuppressionInfo{s} + } + is = append(is, ig) + } + i[file] = is +} + +func (i ignores) get(file string, line string) map[string][]issue.SuppressionInfo { + start, end := i.parseLine(line) + if is, ok := i[file]; ok { + for _, i := range is { + if i.start <= start && i.end >= end { + return i.suppressions + } + } + } + return map[string][]issue.SuppressionInfo{} +} + // The Context is populated with data parsed from the source code as it is scanned. // It is passed through to all rule functions as they are called. Rules may use -// this data in conjunction withe the encountered AST node. +// this data in conjunction with the encountered AST node. type Context struct { FileSet *token.FileSet Comments ast.CommentMap @@ -60,12 +141,24 @@ type Context struct { Pkg *types.Package PkgFiles []*ast.File Root *ast.File - Config Config Imports *ImportTracker - Ignores []map[string][]SuppressionInfo + Config Config + Ignores ignores PassedValues map[string]interface{} } +// GetFileAtNodePos returns the file at the node position in the file set available in the context. +func (ctx *Context) GetFileAtNodePos(node ast.Node) *token.File { + return ctx.FileSet.File(node.Pos()) +} + +// NewIssue creates a new issue +func (ctx *Context) NewIssue(node ast.Node, ruleID, desc string, + severity, confidence issue.Score, +) *issue.Issue { + return issue.New(ctx.GetFileAtNodePos(node), node, ruleID, desc, severity, confidence) +} + // Metrics used when reporting information about a scanning run. type Metrics struct { NumFiles int `json:"files"` @@ -82,7 +175,7 @@ type Analyzer struct { context *Context config Config logger *log.Logger - issues []*Issue + issues []*issue.Issue stats *Metrics errors map[string][]Error // keys are file paths; values are the golang errors in those files tests bool @@ -90,13 +183,8 @@ type Analyzer struct { showIgnored bool trackSuppressions bool concurrency int -} - -// SuppressionInfo object is to record the kind and the justification that used -// to suppress violations. -type SuppressionInfo struct { - Kind string `json:"kind"` - Justification string `json:"justification"` + analyzerList []*analysis.Analyzer + mu sync.Mutex } // NewAnalyzer builds a new analyzer. @@ -119,13 +207,14 @@ func NewAnalyzer(conf Config, tests bool, excludeGenerated bool, trackSuppressio context: &Context{}, config: conf, logger: logger, - issues: make([]*Issue, 0, 16), + issues: make([]*issue.Issue, 0, 16), stats: &Metrics{}, errors: make(map[string][]Error), tests: tests, concurrency: concurrency, excludeGenerated: excludeGenerated, trackSuppressions: trackSuppressions, + analyzerList: analyzers.BuildDefaultAnalyzers(), } } @@ -172,9 +261,9 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error for { select { case s := <-j: - packages, err := gosec.load(s, config) + pkgs, err := gosec.load(s, config) select { - case r <- result{pkgPath: s, pkgs: packages, err: err}: + case r <- result{pkgPath: s, pkgs: pkgs, err: err}: case <-quit: // we've been told to stop, probably an error while // processing a previous result. @@ -216,7 +305,8 @@ func (gosec *Analyzer) Process(buildTags []string, packagePaths ...string) error wg.Wait() // wait for the goroutines to stop return fmt.Errorf("parsing errors in pkg %q: %w", pkg.Name, err) } - gosec.Check(pkg) + gosec.CheckRules(pkg) + gosec.CheckAnalyzers(pkg) } } } @@ -235,7 +325,9 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. // step 1/3 create build context. buildD := build.Default // step 2/3: add build tags to get env dependent files into basePackage. + gosec.mu.Lock() buildD.BuildTags = conf.BuildFlags + gosec.mu.Unlock() basePackage, err := buildD.ImportDir(pkgPath, build.ImportComment) if err != nil { return []*packages.Package{}, fmt.Errorf("importing dir %q: %w", pkgPath, err) @@ -259,7 +351,9 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. } // step 3/3 remove build tags from conf to proceed build correctly. + gosec.mu.Lock() conf.BuildFlags = nil + defer gosec.mu.Unlock() pkgs, err := packages.Load(conf, packageFiles...) if err != nil { return []*packages.Package{}, fmt.Errorf("loading files from package %q: %w", pkgPath, err) @@ -267,8 +361,8 @@ func (gosec *Analyzer) load(pkgPath string, conf *packages.Config) ([]*packages. return pkgs, nil } -// Check runs analysis on the given package -func (gosec *Analyzer) Check(pkg *packages.Package) { +// CheckRules runs analysis on the given package. +func (gosec *Analyzer) CheckRules(pkg *packages.Package) { gosec.logger.Println("Checking package:", pkg.Name) for _, file := range pkg.Syntax { fp := pkg.Fset.File(file.Pos()) @@ -296,14 +390,93 @@ func (gosec *Analyzer) Check(pkg *packages.Package) { gosec.context.Pkg = pkg.Types gosec.context.PkgFiles = pkg.Syntax gosec.context.Imports = NewImportTracker() - gosec.context.Imports.TrackFile(file) gosec.context.PassedValues = make(map[string]interface{}) + gosec.context.Ignores = newIgnores() + gosec.updateIgnores() ast.Walk(gosec, file) gosec.stats.NumFiles++ gosec.stats.NumLines += pkg.Fset.File(file.Pos()).LineCount() } } +// CheckAnalyzers runs analyzers on a given package. +func (gosec *Analyzer) CheckAnalyzers(pkg *packages.Package) { + ssaResult, err := gosec.buildSSA(pkg) + if err != nil || ssaResult == nil { + gosec.logger.Printf("Error building the SSA representation of the package %q: %s", pkg.Name, err) + return + } + + resultMap := map[*analysis.Analyzer]interface{}{ + buildssa.Analyzer: &analyzers.SSAAnalyzerResult{ + Config: gosec.Config(), + Logger: gosec.logger, + SSA: ssaResult.(*buildssa.SSA), + }, + } + for _, analyzer := range gosec.analyzerList { + pass := &analysis.Pass{ + Analyzer: analyzer, + Fset: pkg.Fset, + Files: pkg.Syntax, + OtherFiles: pkg.OtherFiles, + IgnoredFiles: pkg.IgnoredFiles, + Pkg: pkg.Types, + TypesInfo: pkg.TypesInfo, + TypesSizes: pkg.TypesSizes, + ResultOf: resultMap, + Report: func(d analysis.Diagnostic) {}, + ImportObjectFact: nil, + ExportObjectFact: nil, + ImportPackageFact: nil, + ExportPackageFact: nil, + AllObjectFacts: nil, + AllPackageFacts: nil, + } + result, err := pass.Analyzer.Run(pass) + if err != nil { + gosec.logger.Printf("Error running analyzer %s: %s\n", analyzer.Name, err) + continue + } + if result != nil { + if passIssues, ok := result.([]*issue.Issue); ok { + for _, iss := range passIssues { + gosec.updateIssues(iss) + } + } + } + } +} + +// buildSSA runs the SSA pass which builds the SSA representation of the package. It handles gracefully any panic. +func (gosec *Analyzer) buildSSA(pkg *packages.Package) (interface{}, error) { + defer func() { + if r := recover(); r != nil { + gosec.logger.Printf("Panic when running SSA analyser on package: %s", pkg.Name) + } + }() + ssaPass := &analysis.Pass{ + Analyzer: buildssa.Analyzer, + Fset: pkg.Fset, + Files: pkg.Syntax, + OtherFiles: pkg.OtherFiles, + IgnoredFiles: pkg.IgnoredFiles, + Pkg: pkg.Types, + TypesInfo: pkg.TypesInfo, + TypesSizes: pkg.TypesSizes, + ResultOf: nil, + Report: nil, + ImportObjectFact: nil, + ExportObjectFact: nil, + ImportPackageFact: nil, + ExportPackageFact: nil, + AllObjectFacts: nil, + AllPackageFacts: nil, + } + + return ssaPass.Analyzer.Run(ssaPass) +} + func isGeneratedFile(file *ast.File) bool { for _, comment := range file.Comments { for _, row := range comment.List { @@ -365,14 +538,21 @@ func (gosec *Analyzer) AppendError(file string, err error) { } // ignore a node (and sub-tree) if it is tagged with a nosec tag comment -func (gosec *Analyzer) ignore(n ast.Node) map[string]SuppressionInfo { +func (gosec *Analyzer) ignore(n ast.Node) map[string]issue.SuppressionInfo { if groups, ok := gosec.context.Comments[n]; ok && !gosec.ignoreNosec { // Checks if an alternative for #nosec is set and, if not, uses the default. - noSecDefaultTag := "#nosec" + noSecDefaultTag, err := gosec.config.GetGlobal(Nosec) + if err != nil { + noSecDefaultTag = NoSecTag(string(Nosec)) + } else { + noSecDefaultTag = NoSecTag(noSecDefaultTag) + } noSecAlternativeTag, err := gosec.config.GetGlobal(NoSecAlternative) if err != nil { noSecAlternativeTag = noSecDefaultTag + } else { + noSecAlternativeTag = NoSecTag(noSecAlternativeTag) } for _, group := range groups { @@ -402,13 +582,13 @@ func (gosec *Analyzer) ignore(n ast.Node) map[string]SuppressionInfo { re := regexp.MustCompile(`(G\d{3})`) matches := re.FindAllStringSubmatch(directive, -1) - suppression := SuppressionInfo{ + suppression := issue.SuppressionInfo{ Kind: "inSource", Justification: justification, } // Find the rule IDs to ignore. - ignores := make(map[string]SuppressionInfo) + ignores := make(map[string]issue.SuppressionInfo) for _, v := range matches { ignores[v[1]] = suppression } @@ -427,86 +607,92 @@ func (gosec *Analyzer) ignore(n ast.Node) map[string]SuppressionInfo { // Visit runs the gosec visitor logic over an AST created by parsing go code. // Rule methods added with AddRule will be invoked as necessary. func (gosec *Analyzer) Visit(n ast.Node) ast.Visitor { - // If we've reached the end of this branch, pop off the ignores stack. - if n == nil { - if len(gosec.context.Ignores) > 0 { - gosec.context.Ignores = gosec.context.Ignores[1:] - } - return gosec + // Using ast.File instead of ast.ImportSpec, so that we can track all imports at once. + switch i := n.(type) { + case *ast.File: + gosec.context.Imports.TrackFile(i) } - // Get any new rule exclusions. - ignoredRules := gosec.ignore(n) - - // Now create the union of exclusions. - ignores := map[string][]SuppressionInfo{} - if len(gosec.context.Ignores) > 0 { - for k, v := range gosec.context.Ignores[0] { - ignores[k] = v + for _, rule := range gosec.ruleset.RegisteredFor(n) { + issue, err := rule.Match(n, gosec.context) + if err != nil { + file, line := GetLocation(n, gosec.context) + file = path.Base(file) + gosec.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) } + gosec.updateIssues(issue) } + return gosec +} - for ruleID, suppression := range ignoredRules { - ignores[ruleID] = append(ignores[ruleID], suppression) +func (gosec *Analyzer) updateIgnores() { + for n := range gosec.context.Comments { + gosec.updateIgnoredRulesForNode(n) } +} - // Push the new set onto the stack. - gosec.context.Ignores = append([]map[string][]SuppressionInfo{ignores}, gosec.context.Ignores...) +func (gosec *Analyzer) updateIgnoredRulesForNode(n ast.Node) { + ignoredRules := gosec.ignore(n) + if len(ignoredRules) > 0 { + if gosec.context.Ignores == nil { + gosec.context.Ignores = newIgnores() + } + line := issue.GetLine(gosec.context.FileSet.File(n.Pos()), n) + gosec.context.Ignores.add( + gosec.context.FileSet.File(n.Pos()).Name(), + line, + ignoredRules, + ) + } +} - // Track aliased and initialization imports - gosec.context.Imports.TrackImport(n) +func (gosec *Analyzer) getSuppressionsAtLineInFile(file string, line string, id string) ([]issue.SuppressionInfo, bool) { + ignoredRules := gosec.context.Ignores.get(file, line) + + // Check if the rule was specifically suppressed at this location. + generalSuppressions, generalIgnored := ignoredRules[aliasOfAllRules] + ruleSuppressions, ruleIgnored := ignoredRules[id] + ignored := generalIgnored || ruleIgnored + suppressions := append(generalSuppressions, ruleSuppressions...) + + // Track external suppressions of this rule. + if gosec.ruleset.IsRuleSuppressed(id) { + ignored = true + suppressions = append(suppressions, issue.SuppressionInfo{ + Kind: "external", + Justification: externalSuppressionJustification, + }) + } + return suppressions, ignored +} - for _, rule := range gosec.ruleset.RegisteredFor(n) { - // Check if all rules are ignored. - generalSuppressions, generalIgnored := ignores[aliasOfAllRules] - // Check if the specific rule is ignored - ruleSuppressions, ruleIgnored := ignores[rule.ID()] - - ignored := generalIgnored || ruleIgnored - suppressions := append(generalSuppressions, ruleSuppressions...) - - // Track external suppressions. - if gosec.ruleset.IsRuleSuppressed(rule.ID()) { - ignored = true - suppressions = append(suppressions, SuppressionInfo{ - Kind: "external", - Justification: externalSuppressionJustification, - }) +func (gosec *Analyzer) updateIssues(issue *issue.Issue) { + if issue != nil { + suppressions, ignored := gosec.getSuppressionsAtLineInFile(issue.File, issue.Line, issue.RuleID) + if gosec.showIgnored { + issue.NoSec = ignored } - - issue, err := rule.Match(n, gosec.context) - if err != nil { - file, line := GetLocation(n, gosec.context) - file = path.Base(file) - gosec.logger.Printf("Rule error: %v => %s (%s:%d)\n", reflect.TypeOf(rule), err, file, line) + if !ignored || !gosec.showIgnored { + gosec.stats.NumFound++ } - if issue != nil { - if gosec.showIgnored { - issue.NoSec = ignored - } - if !ignored || !gosec.showIgnored { - gosec.stats.NumFound++ - } - if ignored && gosec.trackSuppressions { - issue.WithSuppressions(suppressions) - gosec.issues = append(gosec.issues, issue) - } else if !ignored || gosec.showIgnored || gosec.ignoreNosec { - gosec.issues = append(gosec.issues, issue) - } + if ignored && gosec.trackSuppressions { + issue.WithSuppressions(suppressions) + gosec.issues = append(gosec.issues, issue) + } else if !ignored || gosec.showIgnored || gosec.ignoreNosec { + gosec.issues = append(gosec.issues, issue) } } - return gosec } // Report returns the current issues discovered and the metrics about the scan -func (gosec *Analyzer) Report() ([]*Issue, *Metrics, map[string][]Error) { +func (gosec *Analyzer) Report() ([]*issue.Issue, *Metrics, map[string][]Error) { return gosec.issues, gosec.stats, gosec.errors } // Reset clears state such as context, issues and metrics from the configured analyzer func (gosec *Analyzer) Reset() { gosec.context = &Context{} - gosec.issues = make([]*Issue, 0, 16) + gosec.issues = make([]*issue.Issue, 0, 16) gosec.stats = &Metrics{} gosec.ruleset = NewRuleSet() } diff --git a/vendor/github.com/securego/gosec/v2/analyzers/slice_bounds.go b/vendor/github.com/securego/gosec/v2/analyzers/slice_bounds.go new file mode 100644 index 0000000000..08a55eb429 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/analyzers/slice_bounds.go @@ -0,0 +1,386 @@ +// (c) Copyright gosec's authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analyzers + +import ( + "errors" + "fmt" + "go/token" + "regexp" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" + + "github.com/securego/gosec/v2/issue" +) + +type bound int + +const ( + lowerUnbounded bound = iota + upperUnbounded + unbounded + upperBounded +) + +const maxDepth = 20 + +func newSliceBoundsAnalyzer(id string, description string) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: id, + Doc: description, + Run: runSliceBounds, + Requires: []*analysis.Analyzer{buildssa.Analyzer}, + } +} + +func runSliceBounds(pass *analysis.Pass) (interface{}, error) { + ssaResult, err := getSSAResult(pass) + if err != nil { + return nil, err + } + + issues := map[ssa.Instruction]*issue.Issue{} + ifs := map[ssa.If]*ssa.BinOp{} + for _, mcall := range ssaResult.SSA.SrcFuncs { + for _, block := range mcall.DomPreorder() { + for _, instr := range block.Instrs { + switch instr := instr.(type) { + case *ssa.Alloc: + sliceCap, err := extractSliceCapFromAlloc(instr.String()) + if err != nil { + break + } + allocRefs := instr.Referrers() + if allocRefs == nil { + break + } + for _, instr := range *allocRefs { + if slice, ok := instr.(*ssa.Slice); ok { + if _, ok := slice.X.(*ssa.Alloc); ok { + if slice.Parent() != nil { + l, h := extractSliceBounds(slice) + newCap := computeSliceNewCap(l, h, sliceCap) + violations := []ssa.Instruction{} + trackSliceBounds(0, newCap, slice, &violations, ifs) + for _, s := range violations { + switch s := s.(type) { + case *ssa.Slice: + issue := newIssue( + pass.Analyzer.Name, + "slice bounds out of range", + pass.Fset, + s.Pos(), + issue.Low, + issue.High) + issues[s] = issue + case *ssa.IndexAddr: + issue := newIssue( + pass.Analyzer.Name, + "slice index out of range", + pass.Fset, + s.Pos(), + issue.Low, + issue.High) + issues[s] = issue + } + } + } + } + } + } + } + } + } + } + + for ifref, binop := range ifs { + bound, value, err := extractBinOpBound(binop) + if err != nil { + continue + } + for i, block := range ifref.Block().Succs { + if i == 1 { + bound = invBound(bound) + } + for _, instr := range block.Instrs { + if _, ok := issues[instr]; ok { + switch bound { + case lowerUnbounded: + break + case upperUnbounded, unbounded: + delete(issues, instr) + case upperBounded: + switch tinstr := instr.(type) { + case *ssa.Slice: + lower, upper := extractSliceBounds(tinstr) + if isSliceInsideBounds(0, value, lower, upper) { + delete(issues, instr) + } + case *ssa.IndexAddr: + indexValue, err := extractIntValue(tinstr.Index.String()) + if err != nil { + break + } + if isSliceIndexInsideBounds(0, value, indexValue) { + delete(issues, instr) + } + } + } + } + } + } + } + + foundIssues := []*issue.Issue{} + for _, issue := range issues { + foundIssues = append(foundIssues, issue) + } + if len(foundIssues) > 0 { + return foundIssues, nil + } + return nil, nil +} + +func trackSliceBounds(depth int, sliceCap int, slice ssa.Node, violations *[]ssa.Instruction, ifs map[ssa.If]*ssa.BinOp) { + if depth == maxDepth { + return + } + depth++ + if violations == nil { + violations = &[]ssa.Instruction{} + } + referrers := slice.Referrers() + if referrers != nil { + for _, refinstr := range *referrers { + switch refinstr := refinstr.(type) { + case *ssa.Slice: + checkAllSlicesBounds(depth, sliceCap, refinstr, violations, ifs) + switch refinstr.X.(type) { + case *ssa.Alloc, *ssa.Parameter: + l, h := extractSliceBounds(refinstr) + newCap := computeSliceNewCap(l, h, sliceCap) + trackSliceBounds(depth, newCap, refinstr, violations, ifs) + } + case *ssa.IndexAddr: + indexValue, err := extractIntValue(refinstr.Index.String()) + if err == nil && !isSliceIndexInsideBounds(0, sliceCap, indexValue) { + *violations = append(*violations, refinstr) + } + case *ssa.Call: + if ifref, cond := extractSliceIfLenCondition(refinstr); ifref != nil && cond != nil { + ifs[*ifref] = cond + } else { + parPos := -1 + for pos, arg := range refinstr.Call.Args { + if a, ok := arg.(*ssa.Slice); ok && a == slice { + parPos = pos + } + } + if fn, ok := refinstr.Call.Value.(*ssa.Function); ok { + if len(fn.Params) > parPos && parPos > -1 { + param := fn.Params[parPos] + trackSliceBounds(depth, sliceCap, param, violations, ifs) + } + } + } + } + } + } +} + +func checkAllSlicesBounds(depth int, sliceCap int, slice *ssa.Slice, violations *[]ssa.Instruction, ifs map[ssa.If]*ssa.BinOp) { + if depth == maxDepth { + return + } + depth++ + if violations == nil { + violations = &[]ssa.Instruction{} + } + sliceLow, sliceHigh := extractSliceBounds(slice) + if !isSliceInsideBounds(0, sliceCap, sliceLow, sliceHigh) { + *violations = append(*violations, slice) + } + switch slice.X.(type) { + case *ssa.Alloc, *ssa.Parameter, *ssa.Slice: + l, h := extractSliceBounds(slice) + newCap := computeSliceNewCap(l, h, sliceCap) + trackSliceBounds(depth, newCap, slice, violations, ifs) + } + + references := slice.Referrers() + if references == nil { + return + } + for _, ref := range *references { + switch s := ref.(type) { + case *ssa.Slice: + checkAllSlicesBounds(depth, sliceCap, s, violations, ifs) + switch s.X.(type) { + case *ssa.Alloc, *ssa.Parameter: + l, h := extractSliceBounds(s) + newCap := computeSliceNewCap(l, h, sliceCap) + trackSliceBounds(depth, newCap, s, violations, ifs) + } + } + } +} + +func extractSliceIfLenCondition(call *ssa.Call) (*ssa.If, *ssa.BinOp) { + if builtInLen, ok := call.Call.Value.(*ssa.Builtin); ok { + if builtInLen.Name() == "len" { + refs := call.Referrers() + if refs != nil { + for _, ref := range *refs { + if binop, ok := ref.(*ssa.BinOp); ok { + binoprefs := binop.Referrers() + for _, ref := range *binoprefs { + if ifref, ok := ref.(*ssa.If); ok { + return ifref, binop + } + } + } + } + } + } + } + return nil, nil +} + +func computeSliceNewCap(l, h, oldCap int) int { + if l == 0 && h == 0 { + return oldCap + } + if l > 0 && h == 0 { + return oldCap - l + } + if l == 0 && h > 0 { + return h + } + return h - l +} + +func invBound(bound bound) bound { + switch bound { + case lowerUnbounded: + return upperUnbounded + case upperUnbounded: + return lowerUnbounded + case upperBounded: + return unbounded + case unbounded: + return upperBounded + default: + return unbounded + } +} + +func extractBinOpBound(binop *ssa.BinOp) (bound, int, error) { + if binop.X != nil { + if x, ok := binop.X.(*ssa.Const); ok { + value, err := strconv.Atoi(x.Value.String()) + if err != nil { + return lowerUnbounded, value, err + } + switch binop.Op { + case token.LSS, token.LEQ: + return upperUnbounded, value, nil + case token.GTR, token.GEQ: + return lowerUnbounded, value, nil + case token.EQL: + return upperBounded, value, nil + case token.NEQ: + return unbounded, value, nil + } + } + } + if binop.Y != nil { + if y, ok := binop.Y.(*ssa.Const); ok { + value, err := strconv.Atoi(y.Value.String()) + if err != nil { + return lowerUnbounded, value, err + } + switch binop.Op { + case token.LSS, token.LEQ: + return lowerUnbounded, value, nil + case token.GTR, token.GEQ: + return upperUnbounded, value, nil + case token.EQL: + return upperBounded, value, nil + case token.NEQ: + return unbounded, value, nil + } + } + } + return lowerUnbounded, 0, fmt.Errorf("unable to extract constant from binop") +} + +func isSliceIndexInsideBounds(l, h int, index int) bool { + return (l <= index && index < h) +} + +func isSliceInsideBounds(l, h int, cl, ch int) bool { + return (l <= cl && h >= ch) && (l <= ch && h >= cl) +} + +func extractSliceBounds(slice *ssa.Slice) (int, int) { + var low int + if slice.Low != nil { + l, err := extractIntValue(slice.Low.String()) + if err == nil { + low = l + } + } + var high int + if slice.High != nil { + h, err := extractIntValue(slice.High.String()) + if err == nil { + high = h + } + } + return low, high +} + +func extractIntValue(value string) (int, error) { + parts := strings.Split(value, ":") + if len(parts) != 2 { + return 0, fmt.Errorf("invalid value: %s", value) + } + if parts[1] != "int" { + return 0, fmt.Errorf("invalid value: %s", value) + } + return strconv.Atoi(parts[0]) +} + +func extractSliceCapFromAlloc(instr string) (int, error) { + re := regexp.MustCompile(`new \[(\d+)\]*`) + var sliceCap int + matches := re.FindAllStringSubmatch(instr, -1) + if matches == nil { + return sliceCap, errors.New("no slice cap found") + } + + if len(matches) > 0 { + m := matches[0] + if len(m) > 1 { + return strconv.Atoi(m[1]) + } + } + + return 0, errors.New("no slice cap found") +} diff --git a/vendor/github.com/securego/gosec/v2/analyzers/util.go b/vendor/github.com/securego/gosec/v2/analyzers/util.go new file mode 100644 index 0000000000..5941184aa2 --- /dev/null +++ b/vendor/github.com/securego/gosec/v2/analyzers/util.go @@ -0,0 +1,98 @@ +// (c) Copyright gosec's authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package analyzers + +import ( + "fmt" + "go/token" + "log" + "os" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + + "github.com/securego/gosec/v2/issue" +) + +// SSAAnalyzerResult contains various information returned by the +// SSA analysis along with some configuration +type SSAAnalyzerResult struct { + Config map[string]interface{} + Logger *log.Logger + SSA *buildssa.SSA +} + +// BuildDefaultAnalyzers returns the default list of analyzers +func BuildDefaultAnalyzers() []*analysis.Analyzer { + return []*analysis.Analyzer{ + newSliceBoundsAnalyzer("G602", "Possible slice bounds out of range"), + } +} + +// getSSAResult retrieves the SSA result from analysis pass +func getSSAResult(pass *analysis.Pass) (*SSAAnalyzerResult, error) { + result, ok := pass.ResultOf[buildssa.Analyzer] + if !ok { + return nil, fmt.Errorf("no SSA result found in the analysis pass") + } + ssaResult, ok := result.(*SSAAnalyzerResult) + if !ok { + return nil, fmt.Errorf("the analysis pass result is not of type SSA") + } + return ssaResult, nil +} + +// newIssue creates a new gosec issue +func newIssue(analyzerID string, desc string, fileSet *token.FileSet, + pos token.Pos, severity, confidence issue.Score, +) *issue.Issue { + file := fileSet.File(pos) + line := file.Line(pos) + col := file.Position(pos).Column + + return &issue.Issue{ + RuleID: analyzerID, + File: file.Name(), + Line: strconv.Itoa(line), + Col: strconv.Itoa(col), + Severity: severity, + Confidence: confidence, + What: desc, + Cwe: issue.GetCweByRule(analyzerID), + Code: issueCodeSnippet(fileSet, pos), + } +} + +func issueCodeSnippet(fileSet *token.FileSet, pos token.Pos) string { + file := fileSet.File(pos) + + start := (int64)(file.Line(pos)) + if start-issue.SnippetOffset > 0 { + start = start - issue.SnippetOffset + } + end := (int64)(file.Line(pos)) + end = end + issue.SnippetOffset + + var code string + if file, err := os.Open(file.Name()); err == nil { + defer file.Close() // #nosec + code, err = issue.CodeSnippet(file, start, end) + if err != nil { + return err.Error() + } + } + return code +} diff --git a/vendor/github.com/securego/gosec/v2/config.go b/vendor/github.com/securego/gosec/v2/config.go index 443d45f78b..9cbb7a7134 100644 --- a/vendor/github.com/securego/gosec/v2/config.go +++ b/vendor/github.com/securego/gosec/v2/config.go @@ -29,8 +29,15 @@ const ( ExcludeRules GlobalOption = "exclude" // IncludeRules global option for should be load IncludeRules GlobalOption = "include" + // SSA global option to enable go analysis framework with SSA support + SSA GlobalOption = "ssa" ) +// NoSecTag returns the tag used to disable gosec for a line of code. +func NoSecTag(tag string) string { + return fmt.Sprintf("%s%s", "#", tag) +} + // Config is used to provide configuration and customization to each of the rules. type Config map[string]interface{} diff --git a/vendor/github.com/securego/gosec/v2/cwe/data.go b/vendor/github.com/securego/gosec/v2/cwe/data.go index 0e377b96a6..79a6b9d231 100644 --- a/vendor/github.com/securego/gosec/v2/cwe/data.go +++ b/vendor/github.com/securego/gosec/v2/cwe/data.go @@ -1,7 +1,5 @@ package cwe -import "fmt" - const ( // Acronym is the acronym of CWE Acronym = "CWE" @@ -13,134 +11,128 @@ const ( Organization = "MITRE" // Description the description of CWE Description = "The MITRE Common Weakness Enumeration" -) - -var ( // InformationURI link to the published CWE PDF - InformationURI = fmt.Sprintf("https://cwe.mitre.org/data/published/cwe_v%s.pdf/", Version) + InformationURI = "https://cwe.mitre.org/data/published/cwe_v" + Version + ".pdf/" // DownloadURI link to the zipped XML of the CWE list - DownloadURI = fmt.Sprintf("https://cwe.mitre.org/data/xml/cwec_v%s.xml.zip", Version) - - data = map[string]*Weakness{} - - weaknesses = []*Weakness{ - { - ID: "118", - Description: "The software does not restrict or incorrectly restricts operations within the boundaries of a resource that is accessed using an index or pointer, such as memory or files.", - Name: "Incorrect Access of Indexable Resource ('Range Error')", - }, - { - ID: "190", - Description: "The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.", - Name: "Integer Overflow or Wraparound", - }, - { - ID: "200", - Description: "The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information.", - Name: "Exposure of Sensitive Information to an Unauthorized Actor", - }, - { - ID: "22", - Description: "The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory.", - Name: "Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')", - }, - { - ID: "242", - Description: "The program calls a function that can never be guaranteed to work safely.", - Name: "Use of Inherently Dangerous Function", - }, - { - ID: "276", - Description: "During installation, installed file permissions are set to allow anyone to modify those files.", - Name: "Incorrect Default Permissions", - }, - { - ID: "295", - Description: "The software does not validate, or incorrectly validates, a certificate.", - Name: "Improper Certificate Validation", - }, - { - ID: "310", - Description: "Weaknesses in this category are related to the design and implementation of data confidentiality and integrity. Frequently these deal with the use of encoding techniques, encryption libraries, and hashing algorithms. The weaknesses in this category could lead to a degradation of the quality data if they are not addressed.", - Name: "Cryptographic Issues", - }, - { - ID: "322", - Description: "The software performs a key exchange with an actor without verifying the identity of that actor.", - Name: "Key Exchange without Entity Authentication", - }, - { - ID: "326", - Description: "The software stores or transmits sensitive data using an encryption scheme that is theoretically sound, but is not strong enough for the level of protection required.", - Name: "Inadequate Encryption Strength", - }, - { - ID: "327", - Description: "The use of a broken or risky cryptographic algorithm is an unnecessary risk that may result in the exposure of sensitive information.", - Name: "Use of a Broken or Risky Cryptographic Algorithm", - }, - { - ID: "338", - Description: "The product uses a Pseudo-Random Number Generator (PRNG) in a security context, but the PRNG's algorithm is not cryptographically strong.", - Name: "Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG)", - }, - { - ID: "377", - Description: "Creating and using insecure temporary files can leave application and system data vulnerable to attack.", - Name: "Insecure Temporary File", - }, - { - ID: "400", - Description: "The software does not properly control the allocation and maintenance of a limited resource, thereby enabling an actor to influence the amount of resources consumed, eventually leading to the exhaustion of available resources.", - Name: "Uncontrolled Resource Consumption", - }, - { - ID: "409", - Description: "The software does not handle or incorrectly handles a compressed input with a very high compression ratio that produces a large output.", - Name: "Improper Handling of Highly Compressed Data (Data Amplification)", - }, - { - ID: "703", - Description: "The software does not properly anticipate or handle exceptional conditions that rarely occur during normal operation of the software.", - Name: "Improper Check or Handling of Exceptional Conditions", - }, - { - ID: "78", - Description: "The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component.", - Name: "Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')", - }, - { - ID: "79", - Description: "The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users.", - Name: "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", - }, - { - ID: "798", - Description: "The software contains hard-coded credentials, such as a password or cryptographic key, which it uses for its own inbound authentication, outbound communication to external components, or encryption of internal data.", - Name: "Use of Hard-coded Credentials", - }, - { - ID: "88", - Description: "The software constructs a string for a command to executed by a separate component\nin another control sphere, but it does not properly delimit the\nintended arguments, options, or switches within that command string.", - Name: "Improper Neutralization of Argument Delimiters in a Command ('Argument Injection')", - }, - { - ID: "89", - Description: "The software constructs all or part of an SQL command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended SQL command when it is sent to a downstream component.", - Name: "Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", - }, - } + DownloadURI = "https://cwe.mitre.org/data/xml/cwec_v" + Version + ".xml.zip" ) -func init() { - for _, weakness := range weaknesses { - data[weakness.ID] = weakness - } +var idWeaknesses = map[string]*Weakness{ + "118": { + ID: "118", + Description: "The software does not restrict or incorrectly restricts operations within the boundaries of a resource that is accessed using an index or pointer, such as memory or files.", + Name: "Incorrect Access of Indexable Resource ('Range Error')", + }, + "190": { + ID: "190", + Description: "The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.", + Name: "Integer Overflow or Wraparound", + }, + "200": { + ID: "200", + Description: "The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information.", + Name: "Exposure of Sensitive Information to an Unauthorized Actor", + }, + "22": { + ID: "22", + Description: "The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory.", + Name: "Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')", + }, + "242": { + ID: "242", + Description: "The program calls a function that can never be guaranteed to work safely.", + Name: "Use of Inherently Dangerous Function", + }, + "276": { + ID: "276", + Description: "During installation, installed file permissions are set to allow anyone to modify those files.", + Name: "Incorrect Default Permissions", + }, + "295": { + ID: "295", + Description: "The software does not validate, or incorrectly validates, a certificate.", + Name: "Improper Certificate Validation", + }, + "310": { + ID: "310", + Description: "Weaknesses in this category are related to the design and implementation of data confidentiality and integrity. Frequently these deal with the use of encoding techniques, encryption libraries, and hashing algorithms. The weaknesses in this category could lead to a degradation of the quality data if they are not addressed.", + Name: "Cryptographic Issues", + }, + "322": { + ID: "322", + Description: "The software performs a key exchange with an actor without verifying the identity of that actor.", + Name: "Key Exchange without Entity Authentication", + }, + "326": { + ID: "326", + Description: "The software stores or transmits sensitive data using an encryption scheme that is theoretically sound, but is not strong enough for the level of protection required.", + Name: "Inadequate Encryption Strength", + }, + "327": { + ID: "327", + Description: "The use of a broken or risky cryptographic algorithm is an unnecessary risk that may result in the exposure of sensitive information.", + Name: "Use of a Broken or Risky Cryptographic Algorithm", + }, + "338": { + ID: "338", + Description: "The product uses a Pseudo-Random Number Generator (PRNG) in a security context, but the PRNG's algorithm is not cryptographically strong.", + Name: "Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG)", + }, + "377": { + ID: "377", + Description: "Creating and using insecure temporary files can leave application and system data vulnerable to attack.", + Name: "Insecure Temporary File", + }, + "400": { + ID: "400", + Description: "The software does not properly control the allocation and maintenance of a limited resource, thereby enabling an actor to influence the amount of resources consumed, eventually leading to the exhaustion of available resources.", + Name: "Uncontrolled Resource Consumption", + }, + "409": { + ID: "409", + Description: "The software does not handle or incorrectly handles a compressed input with a very high compression ratio that produces a large output.", + Name: "Improper Handling of Highly Compressed Data (Data Amplification)", + }, + "703": { + ID: "703", + Description: "The software does not properly anticipate or handle exceptional conditions that rarely occur during normal operation of the software.", + Name: "Improper Check or Handling of Exceptional Conditions", + }, + "78": { + ID: "78", + Description: "The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component.", + Name: "Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')", + }, + "79": { + ID: "79", + Description: "The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users.", + Name: "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')", + }, + "798": { + ID: "798", + Description: "The software contains hard-coded credentials, such as a password or cryptographic key, which it uses for its own inbound authentication, outbound communication to external components, or encryption of internal data.", + Name: "Use of Hard-coded Credentials", + }, + "88": { + ID: "88", + Description: "The software constructs a string for a command to executed by a separate component\nin another control sphere, but it does not properly delimit the\nintended arguments, options, or switches within that command string.", + Name: "Improper Neutralization of Argument Delimiters in a Command ('Argument Injection')", + }, + "89": { + ID: "89", + Description: "The software constructs all or part of an SQL command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended SQL command when it is sent to a downstream component.", + Name: "Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')", + }, + "676": { + ID: "676", + Description: "The program invokes a potentially dangerous function that could introduce a vulnerability if it is used incorrectly, but the function can also be used safely.", + Name: "Use of Potentially Dangerous Function", + }, } // Get Retrieves a CWE weakness by it's id func Get(id string) *Weakness { - weakness, ok := data[id] + weakness, ok := idWeaknesses[id] if ok && weakness != nil { return weakness } diff --git a/vendor/github.com/securego/gosec/v2/entrypoint.sh b/vendor/github.com/securego/gosec/v2/entrypoint.sh index af2acd4b9c..bc6ad6a241 100644 --- a/vendor/github.com/securego/gosec/v2/entrypoint.sh +++ b/vendor/github.com/securego/gosec/v2/entrypoint.sh @@ -4,4 +4,8 @@ # provides all arguments concatenated as a single string. ARGS=("$@") +if [[ ! -z "${GITHUB_AUTHENTICATION_TOKEN}" ]]; then + git config --global --add url."https://x-access-token:${GITHUB_AUTHENTICATION_TOKEN}@github.com/".insteadOf "https://github.com/" +fi + /bin/gosec ${ARGS[*]} diff --git a/vendor/github.com/securego/gosec/v2/helpers.go b/vendor/github.com/securego/gosec/v2/helpers.go index 437d0324b5..15b2b5f3a3 100644 --- a/vendor/github.com/securego/gosec/v2/helpers.go +++ b/vendor/github.com/securego/gosec/v2/helpers.go @@ -37,12 +37,9 @@ import ( // // node, matched := MatchCallByPackage(n, ctx, "math/rand", "Read") func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*ast.CallExpr, bool) { - importedName, found := GetImportedName(pkg, c) + importedNames, found := GetImportedNames(pkg, c) if !found { - importedName, found = GetAliasedName(pkg, c) - if !found { - return nil, false - } + return nil, false } if callExpr, ok := n.(*ast.CallExpr); ok { @@ -50,7 +47,10 @@ func MatchCallByPackage(n ast.Node, c *Context, pkg string, names ...string) (*a if err != nil { return nil, false } - if packageName == importedName { + for _, in := range importedNames { + if packageName != in { + continue + } for _, name := range names { if callName == name { return callExpr, true @@ -96,11 +96,46 @@ func GetChar(n ast.Node) (byte, error) { return 0, fmt.Errorf("Unexpected AST node type: %T", n) } +// GetStringRecursive will recursively walk down a tree of *ast.BinaryExpr. It will then concat the results, and return. +// Unlike the other getters, it does _not_ raise an error for unknown ast.Node types. At the base, the recursion will hit a non-BinaryExpr type, +// either BasicLit or other, so it's not an error case. It will only error if `strconv.Unquote` errors. This matters, because there's +// currently functionality that relies on error values being returned by GetString if and when it hits a non-basiclit string node type, +// hence for cases where recursion is needed, we use this separate function, so that we can still be backwards compatible. +// +// This was added to handle a SQL injection concatenation case where the injected value is infixed between two strings, not at the start or end. See example below +// +// Do note that this will omit non-string values. So for example, if you were to use this node: +// ```go +// q := "SELECT * FROM foo WHERE name = '" + os.Args[0] + "' AND 1=1" // will result in "SELECT * FROM foo WHERE ” AND 1=1" + +func GetStringRecursive(n ast.Node) (string, error) { + if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING { + return strconv.Unquote(node.Value) + } + + if expr, ok := n.(*ast.BinaryExpr); ok { + x, err := GetStringRecursive(expr.X) + if err != nil { + return "", err + } + + y, err := GetStringRecursive(expr.Y) + if err != nil { + return "", err + } + + return x + y, nil + } + + return "", nil +} + // GetString will read and return a string value from an ast.BasicLit func GetString(n ast.Node) (string, error) { if node, ok := n.(*ast.BasicLit); ok && node.Kind == token.STRING { return strconv.Unquote(node.Value) } + return "", fmt.Errorf("Unexpected AST node type: %T", n) } @@ -148,7 +183,7 @@ func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) { case *ast.CallExpr: switch call := expr.Fun.(type) { case *ast.Ident: - if call.Name == "new" { + if call.Name == "new" && len(expr.Args) > 0 { t := ctx.Info.TypeOf(expr.Args[0]) if t != nil { return t.String(), fn.Sel.Name, nil @@ -182,7 +217,7 @@ func GetCallInfo(n ast.Node, ctx *Context) (string, string, error) { } // GetCallStringArgsValues returns the values of strings arguments if they can be resolved -func GetCallStringArgsValues(n ast.Node, ctx *Context) []string { +func GetCallStringArgsValues(n ast.Node, _ *Context) []string { values := []string{} switch node := n.(type) { case *ast.CallExpr: @@ -201,22 +236,21 @@ func GetCallStringArgsValues(n ast.Node, ctx *Context) []string { return values } -// GetIdentStringValues return the string values of an Ident if they can be resolved -func GetIdentStringValues(ident *ast.Ident) []string { +func getIdentStringValues(ident *ast.Ident, stringFinder func(ast.Node) (string, error)) []string { values := []string{} obj := ident.Obj if obj != nil { switch decl := obj.Decl.(type) { case *ast.ValueSpec: for _, v := range decl.Values { - value, err := GetString(v) + value, err := stringFinder(v) if err == nil { values = append(values, value) } } case *ast.AssignStmt: for _, v := range decl.Rhs { - value, err := GetString(v) + value, err := stringFinder(v) if err == nil { values = append(values, value) } @@ -226,6 +260,18 @@ func GetIdentStringValues(ident *ast.Ident) []string { return values } +// getIdentStringRecursive returns the string of values of an Ident if they can be resolved +// The difference between this and GetIdentStringValues is that it will attempt to resolve the strings recursively, +// if it is passed a *ast.BinaryExpr. See GetStringRecursive for details +func GetIdentStringValuesRecursive(ident *ast.Ident) []string { + return getIdentStringValues(ident, GetStringRecursive) +} + +// GetIdentStringValues return the string values of an Ident if they can be resolved +func GetIdentStringValues(ident *ast.Ident) []string { + return getIdentStringValues(ident, GetString) +} + // GetBinaryExprOperands returns all operands of a binary expression by traversing // the expression tree func GetBinaryExprOperands(be *ast.BinaryExpr) []ast.Node { @@ -247,48 +293,23 @@ func GetBinaryExprOperands(be *ast.BinaryExpr) []ast.Node { return result } -// GetImportedName returns the name used for the package within the -// code. It will ignore initialization only imports. -func GetImportedName(path string, ctx *Context) (string, bool) { - importName, imported := ctx.Imports.Imported[path] - if !imported { - return "", false - } - - if _, initonly := ctx.Imports.InitOnly[path]; initonly { - return "", false - } - - return importName, true -} - -// GetAliasedName returns the aliased name used for the package within the -// code. It will ignore initialization only imports. -func GetAliasedName(path string, ctx *Context) (string, bool) { - importName, imported := ctx.Imports.Aliased[path] - if !imported { - return "", false - } - - if _, initonly := ctx.Imports.InitOnly[path]; initonly { - return "", false - } - - return importName, true +// GetImportedNames returns the name(s)/alias(es) used for the package within +// the code. It ignores initialization-only imports. +func GetImportedNames(path string, ctx *Context) (names []string, found bool) { + importNames, imported := ctx.Imports.Imported[path] + return importNames, imported } // GetImportPath resolves the full import path of an identifier based on // the imports in the current context(including aliases). func GetImportPath(name string, ctx *Context) (string, bool) { for path := range ctx.Imports.Imported { - if imported, ok := GetImportedName(path, ctx); ok && imported == name { - return path, true - } - } - - for path := range ctx.Imports.Aliased { - if imported, ok := GetAliasedName(path, ctx); ok && imported == name { - return path, true + if imported, ok := GetImportedNames(path, ctx); ok { + for _, n := range imported { + if n == name { + return path, true + } + } } } @@ -326,7 +347,7 @@ func Getenv(key, userDefault string) string { return userDefault } -// GetPkgRelativePath returns the Go relative relative path derived +// GetPkgRelativePath returns the Go relative path derived // form the given path func GetPkgRelativePath(path string) (string, error) { abspath, err := filepath.Abs(path) diff --git a/vendor/github.com/securego/gosec/v2/import_tracker.go b/vendor/github.com/securego/gosec/v2/import_tracker.go index cbb8c5518c..7984e99f42 100644 --- a/vendor/github.com/securego/gosec/v2/import_tracker.go +++ b/vendor/github.com/securego/gosec/v2/import_tracker.go @@ -22,54 +22,49 @@ import ( // by a source file. It is able to differentiate between plain imports, aliased // imports and init only imports. type ImportTracker struct { - Imported map[string]string - Aliased map[string]string - InitOnly map[string]bool + // Imported is a map of Imported with their associated names/aliases. + Imported map[string][]string } // NewImportTracker creates an empty Import tracker instance func NewImportTracker() *ImportTracker { return &ImportTracker{ - make(map[string]string), - make(map[string]string), - make(map[string]bool), + Imported: make(map[string][]string), } } // TrackFile track all the imports used by the supplied file func (t *ImportTracker) TrackFile(file *ast.File) { for _, imp := range file.Imports { - path := strings.Trim(imp.Path.Value, `"`) - parts := strings.Split(path, "/") - if len(parts) > 0 { - name := parts[len(parts)-1] - t.Imported[path] = name - } + t.TrackImport(imp) } } // TrackPackages tracks all the imports used by the supplied packages func (t *ImportTracker) TrackPackages(pkgs ...*types.Package) { for _, pkg := range pkgs { - t.Imported[pkg.Path()] = pkg.Name() + t.Imported[pkg.Path()] = []string{pkg.Name()} } } -// TrackImport tracks imports and handles the 'unsafe' import -func (t *ImportTracker) TrackImport(n ast.Node) { - if imported, ok := n.(*ast.ImportSpec); ok { - path := strings.Trim(imported.Path.Value, `"`) - if imported.Name != nil { - if imported.Name.Name == "_" { - // Initialization only import - t.InitOnly[path] = true - } else { - // Aliased import - t.Aliased[path] = imported.Name.Name - } - } - if path == "unsafe" { - t.Imported[path] = path +// TrackImport tracks imports. +func (t *ImportTracker) TrackImport(imported *ast.ImportSpec) { + importPath := strings.Trim(imported.Path.Value, `"`) + if imported.Name != nil { + if imported.Name.Name != "_" { + // Aliased import + t.Imported[importPath] = append(t.Imported[importPath], imported.Name.String()) } + } else { + t.Imported[importPath] = append(t.Imported[importPath], importName(importPath)) + } +} + +func importName(importPath string) string { + parts := strings.Split(importPath, "/") + name := importPath + if len(parts) > 0 { + name = parts[len(parts)-1] } + return name } diff --git a/vendor/github.com/securego/gosec/v2/install.sh b/vendor/github.com/securego/gosec/v2/install.sh index 0da55d3791..2b6403cb25 100644 --- a/vendor/github.com/securego/gosec/v2/install.sh +++ b/vendor/github.com/securego/gosec/v2/install.sh @@ -280,11 +280,13 @@ http_copy() { github_release() { owner_repo=$1 version=$2 - test -z "$version" && version="latest" - giturl="https://github.com/${owner_repo}/releases/${version}" + giturl="https://api.github.com/repos/${owner_repo}/releases/tags/${version}" + if [ -z "${version}" ]; then + giturl="https://api.github.com/repos/${owner_repo}/releases/latest" + fi json=$(http_copy "$giturl" "Accept:application/json") test -z "$json" && return 1 - version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name": *"//' | sed 's/".*//') test -z "$version" && return 1 echo "$version" } diff --git a/vendor/github.com/securego/gosec/v2/issue.go b/vendor/github.com/securego/gosec/v2/issue/issue.go similarity index 85% rename from vendor/github.com/securego/gosec/v2/issue.go rename to vendor/github.com/securego/gosec/v2/issue/issue.go index 32b9bc0cc8..1000b20423 100644 --- a/vendor/github.com/securego/gosec/v2/issue.go +++ b/vendor/github.com/securego/gosec/v2/issue/issue.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gosec +package issue import ( "bufio" @@ -66,6 +66,7 @@ var ruleToCWE = map[string]string{ "G111": "22", "G112": "400", "G113": "190", + "G114": "676", "G201": "89", "G202": "89", "G203": "79", @@ -76,7 +77,6 @@ var ruleToCWE = map[string]string{ "G304": "22", "G305": "22", "G306": "276", - "G307": "703", "G401": "326", "G402": "295", "G403": "310", @@ -87,6 +87,7 @@ var ruleToCWE = map[string]string{ "G504": "327", "G505": "327", "G601": "118", + "G602": "118", } // Issue is returned by a gosec rule if it discovers an issue with the scanned code. @@ -104,8 +105,15 @@ type Issue struct { Suppressions []SuppressionInfo `json:"suppressions"` // Suppression info of the issue } +// SuppressionInfo object is to record the kind and the justification that used +// to suppress violations. +type SuppressionInfo struct { + Kind string `json:"kind"` + Justification string `json:"justification"` +} + // FileLocation point out the file path and line number in file -func (i Issue) FileLocation() string { +func (i *Issue) FileLocation() string { return fmt.Sprintf("%s:%s", i.File, i.Line) } @@ -136,11 +144,8 @@ func (c Score) String() string { return "UNDEFINED" } -// codeSnippet extracts a code snippet based on the ast reference -func codeSnippet(file *os.File, start int64, end int64, n ast.Node) (string, error) { - if n == nil { - return "", fmt.Errorf("invalid AST node provided") - } +// CodeSnippet extracts a code snippet based on the ast reference +func CodeSnippet(file *os.File, start int64, end int64) (string, error) { var pos int64 var buf bytes.Buffer scanner := bufio.NewScanner(file) @@ -170,23 +175,21 @@ func codeSnippetEndLine(node ast.Node, fobj *token.File) int64 { return e + SnippetOffset } -// NewIssue creates a new Issue -func NewIssue(ctx *Context, node ast.Node, ruleID, desc string, severity Score, confidence Score) *Issue { - fobj := ctx.FileSet.File(node.Pos()) +// New creates a new Issue +func New(fobj *token.File, node ast.Node, ruleID, desc string, severity, confidence Score) *Issue { name := fobj.Name() - start, end := fobj.Line(node.Pos()), fobj.Line(node.End()) - line := strconv.Itoa(start) - if start != end { - line = fmt.Sprintf("%d-%d", start, end) - } + line := GetLine(fobj, node) col := strconv.Itoa(fobj.Position(node.Pos()).Column) var code string - if file, err := os.Open(fobj.Name()); err == nil { + if node == nil { + code = "invalid AST node provided" + } + if file, err := os.Open(fobj.Name()); err == nil && node != nil { defer file.Close() // #nosec s := codeSnippetStartLine(node, fobj) e := codeSnippetEndLine(node, fobj) - code, err = codeSnippet(file, s, e, node) + code, err = CodeSnippet(file, s, e) if err != nil { code = err.Error() } @@ -210,3 +213,13 @@ func (i *Issue) WithSuppressions(suppressions []SuppressionInfo) *Issue { i.Suppressions = suppressions return i } + +// GetLine returns the line number of a given ast.Node +func GetLine(fobj *token.File, node ast.Node) string { + start, end := fobj.Line(node.Pos()), fobj.Line(node.End()) + line := strconv.Itoa(start) + if start != end { + line = fmt.Sprintf("%d-%d", start, end) + } + return line +} diff --git a/vendor/github.com/securego/gosec/v2/report.go b/vendor/github.com/securego/gosec/v2/report.go index 96b1466d58..4fdeea5206 100644 --- a/vendor/github.com/securego/gosec/v2/report.go +++ b/vendor/github.com/securego/gosec/v2/report.go @@ -1,15 +1,19 @@ package gosec +import ( + "github.com/securego/gosec/v2/issue" +) + // ReportInfo this is report information type ReportInfo struct { Errors map[string][]Error `json:"Golang errors"` - Issues []*Issue + Issues []*issue.Issue Stats *Metrics GosecVersion string } // NewReportInfo instantiate a ReportInfo -func NewReportInfo(issues []*Issue, metrics *Metrics, errors map[string][]Error) *ReportInfo { +func NewReportInfo(issues []*issue.Issue, metrics *Metrics, errors map[string][]Error) *ReportInfo { return &ReportInfo{ Errors: errors, Issues: issues, diff --git a/vendor/github.com/securego/gosec/v2/resolve.go b/vendor/github.com/securego/gosec/v2/resolve.go index cdc287e8e5..a201b8d32b 100644 --- a/vendor/github.com/securego/gosec/v2/resolve.go +++ b/vendor/github.com/securego/gosec/v2/resolve.go @@ -66,7 +66,7 @@ func resolveBinExpr(n *ast.BinaryExpr, c *Context) bool { return (TryResolve(n.X, c) && TryResolve(n.Y, c)) } -func resolveCallExpr(n *ast.CallExpr, c *Context) bool { +func resolveCallExpr(_ *ast.CallExpr, _ *Context) bool { // TODO(tkelsey): next step, full function resolution return false } diff --git a/vendor/github.com/securego/gosec/v2/rule.go b/vendor/github.com/securego/gosec/v2/rule.go index c0429c4c23..490a25da02 100644 --- a/vendor/github.com/securego/gosec/v2/rule.go +++ b/vendor/github.com/securego/gosec/v2/rule.go @@ -15,12 +15,14 @@ package gosec import ( "go/ast" "reflect" + + "github.com/securego/gosec/v2/issue" ) // The Rule interface used by all rules supported by gosec. type Rule interface { ID() string - Match(ast.Node, *Context) (*Issue, error) + Match(ast.Node, *Context) (*issue.Issue, error) } // RuleBuilder is used to register a rule definition with the analyzer @@ -41,7 +43,7 @@ func NewRuleSet() RuleSet { return RuleSet{make(map[reflect.Type][]Rule), make(map[string]bool)} } -// Register adds a trigger for the supplied rule for the the +// Register adds a trigger for the supplied rule for the // specified ast nodes. func (r RuleSet) Register(rule Rule, isSuppressed bool, nodes ...ast.Node) { for _, n := range nodes { diff --git a/vendor/github.com/securego/gosec/v2/rules/archive.go b/vendor/github.com/securego/gosec/v2/rules/archive.go index 92c7e4481c..987047435b 100644 --- a/vendor/github.com/securego/gosec/v2/rules/archive.go +++ b/vendor/github.com/securego/gosec/v2/rules/archive.go @@ -5,10 +5,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type archive struct { - gosec.MetaData + issue.MetaData calls gosec.CallList argTypes []string } @@ -18,7 +19,7 @@ func (a *archive) ID() string { } // Match inspects AST nodes to determine if the filepath.Joins uses any argument derived from type zip.File or tar.Header -func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (a *archive) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node := a.calls.ContainsPkgCallExpr(n, c, false); node != nil { for _, arg := range node.Args { var argType types.Type @@ -38,7 +39,7 @@ func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { if argType != nil { for _, t := range a.argTypes { if argType.String() == t { - return gosec.NewIssue(c, n, a.ID(), a.What, a.Severity, a.Confidence), nil + return c.NewIssue(n, a.ID(), a.What, a.Severity, a.Confidence), nil } } } @@ -48,17 +49,17 @@ func (a *archive) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // NewArchive creates a new rule which detects the file traversal when extracting zip/tar archives -func NewArchive(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewArchive(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("path/filepath", "Join") calls.Add("path", "Join") return &archive{ calls: calls, argTypes: []string{"*archive/zip.File", "*archive/tar.Header"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "File traversal when extracting zip/tar archive", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go b/vendor/github.com/securego/gosec/v2/rules/bad_defer.go deleted file mode 100644 index 13b42070da..0000000000 --- a/vendor/github.com/securego/gosec/v2/rules/bad_defer.go +++ /dev/null @@ -1,68 +0,0 @@ -package rules - -import ( - "fmt" - "go/ast" - "strings" - - "github.com/securego/gosec/v2" -) - -type deferType struct { - typ string - methods []string -} - -type badDefer struct { - gosec.MetaData - types []deferType -} - -func (r *badDefer) ID() string { - return r.MetaData.ID -} - -func normalize(typ string) string { - return strings.TrimPrefix(typ, "*") -} - -func contains(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - return false -} - -func (r *badDefer) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { - if deferStmt, ok := n.(*ast.DeferStmt); ok { - for _, deferTyp := range r.types { - if typ, method, err := gosec.GetCallInfo(deferStmt.Call, c); err == nil { - if normalize(typ) == deferTyp.typ && contains(deferTyp.methods, method) { - return gosec.NewIssue(c, n, r.ID(), fmt.Sprintf(r.What, method, typ), r.Severity, r.Confidence), nil - } - } - } - } - - return nil, nil -} - -// NewDeferredClosing detects unsafe defer of error returning methods -func NewDeferredClosing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { - return &badDefer{ - types: []deferType{ - { - typ: "os.File", - methods: []string{"Close"}, - }, - }, - MetaData: gosec.MetaData{ - ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, - What: "Deferring unsafe method %q on type %q", - }, - }, []ast.Node{(*ast.DeferStmt)(nil)} -} diff --git a/vendor/github.com/securego/gosec/v2/rules/bind.go b/vendor/github.com/securego/gosec/v2/rules/bind.go index 8f6af067ad..fef760c808 100644 --- a/vendor/github.com/securego/gosec/v2/rules/bind.go +++ b/vendor/github.com/securego/gosec/v2/rules/bind.go @@ -19,11 +19,12 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) // Looks for net.Listen("0.0.0.0") or net.Listen(":8080") type bindsToAllNetworkInterfaces struct { - gosec.MetaData + issue.MetaData calls gosec.CallList pattern *regexp.Regexp } @@ -32,7 +33,7 @@ func (r *bindsToAllNetworkInterfaces) ID() string { return r.MetaData.ID } -func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { callExpr := r.calls.ContainsPkgCallExpr(n, c, false) if callExpr == nil { return nil, nil @@ -42,14 +43,14 @@ func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gose if bl, ok := arg.(*ast.BasicLit); ok { if arg, err := gosec.GetString(bl); err == nil { if r.pattern.MatchString(arg) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } else if ident, ok := arg.(*ast.Ident); ok { values := gosec.GetIdentStringValues(ident) for _, value := range values { if r.pattern.MatchString(value) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -57,7 +58,7 @@ func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gose values := gosec.GetCallStringArgsValues(callExpr.Args[0], c) for _, value := range values { if r.pattern.MatchString(value) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -66,17 +67,17 @@ func (r *bindsToAllNetworkInterfaces) Match(n ast.Node, c *gosec.Context) (*gose // NewBindsToAllNetworkInterfaces detects socket connections that are setup to // listen on all network interfaces. -func NewBindsToAllNetworkInterfaces(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewBindsToAllNetworkInterfaces(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("net", "Listen") calls.Add("crypto/tls", "Listen") return &bindsToAllNetworkInterfaces{ calls: calls, pattern: regexp.MustCompile(`^(0.0.0.0|:).*$`), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "Binds to all network interfaces", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/blocklist.go b/vendor/github.com/securego/gosec/v2/rules/blocklist.go index afd4ee56b7..5e03cf7a0c 100644 --- a/vendor/github.com/securego/gosec/v2/rules/blocklist.go +++ b/vendor/github.com/securego/gosec/v2/rules/blocklist.go @@ -19,27 +19,28 @@ import ( "strings" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type blocklistedImport struct { - gosec.MetaData + issue.MetaData Blocklisted map[string]string } func unquote(original string) string { - copy := strings.TrimSpace(original) - copy = strings.TrimLeft(copy, `"`) - return strings.TrimRight(copy, `"`) + cleaned := strings.TrimSpace(original) + cleaned = strings.TrimLeft(cleaned, `"`) + return strings.TrimRight(cleaned, `"`) } func (r *blocklistedImport) ID() string { return r.MetaData.ID } -func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node, ok := n.(*ast.ImportSpec); ok { if description, ok := r.Blocklisted[unquote(node.Path.Value)]; ok { - return gosec.NewIssue(c, node, r.ID(), description, r.Severity, r.Confidence), nil + return c.NewIssue(node, r.ID(), description, r.Severity, r.Confidence), nil } } return nil, nil @@ -47,12 +48,12 @@ func (r *blocklistedImport) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, e // NewBlocklistedImports reports when a blocklisted import is being used. // Typically when a deprecated technology is being used. -func NewBlocklistedImports(id string, conf gosec.Config, blocklist map[string]string) (gosec.Rule, []ast.Node) { +func NewBlocklistedImports(id string, _ gosec.Config, blocklist map[string]string) (gosec.Rule, []ast.Node) { return &blocklistedImport{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, Blocklisted: blocklist, }, []ast.Node{(*ast.ImportSpec)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go index 02256faa98..7e57f1a5b1 100644 --- a/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go +++ b/vendor/github.com/securego/gosec/v2/rules/decompression-bomb.go @@ -19,10 +19,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type decompressionBombCheck struct { - gosec.MetaData + issue.MetaData readerCalls gosec.CallList copyCalls gosec.CallList } @@ -40,7 +41,7 @@ func containsReaderCall(node ast.Node, ctx *gosec.Context, list gosec.CallList) return list.Contains(s, idt) } -func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*issue.Issue, error) { var readerVarObj map[*ast.Object]struct{} // To check multiple lines, ctx.PassedValues is used to store temporary data. @@ -72,7 +73,7 @@ func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gose if idt, ok := n.Args[1].(*ast.Ident); ok { if _, ok := readerVarObj[idt.Obj]; ok { // Detect io.Copy(x, r) - return gosec.NewIssue(ctx, n, d.ID(), d.What, d.Severity, d.Confidence), nil + return ctx.NewIssue(n, d.ID(), d.What, d.Severity, d.Confidence), nil } } } @@ -82,7 +83,7 @@ func (d *decompressionBombCheck) Match(node ast.Node, ctx *gosec.Context) (*gose } // NewDecompressionBombCheck detects if there is potential DoS vulnerability via decompression bomb -func NewDecompressionBombCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewDecompressionBombCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { readerCalls := gosec.NewCallList() readerCalls.Add("compress/gzip", "NewReader") readerCalls.AddAll("compress/zlib", "NewReader", "NewReaderDict") @@ -98,10 +99,10 @@ func NewDecompressionBombCheck(id string, conf gosec.Config) (gosec.Rule, []ast. copyCalls.Add("io", "CopyBuffer") return &decompressionBombCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.Medium, + Severity: issue.Medium, + Confidence: issue.Medium, What: "Potential DoS vulnerability via decompression bomb", }, readerCalls: readerCalls, diff --git a/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go b/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go index c373427b8f..47bcb2dc4a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go +++ b/vendor/github.com/securego/gosec/v2/rules/directory-traversal.go @@ -5,18 +5,19 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type traversal struct { pattern *regexp.Regexp - gosec.MetaData + issue.MetaData } func (r *traversal) ID() string { return r.MetaData.ID } -func (r *traversal) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *traversal) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.CallExpr: return r.matchCallExpr(node, ctx) @@ -24,14 +25,14 @@ func (r *traversal) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) return nil, nil } -func (r *traversal) matchCallExpr(assign *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *traversal) matchCallExpr(assign *ast.CallExpr, ctx *gosec.Context) (*issue.Issue, error) { for _, i := range assign.Args { if basiclit, ok1 := i.(*ast.BasicLit); ok1 { if fun, ok2 := assign.Fun.(*ast.SelectorExpr); ok2 { if x, ok3 := fun.X.(*ast.Ident); ok3 { - string := x.Name + "." + fun.Sel.Name + "(" + basiclit.Value + ")" - if r.pattern.MatchString(string) { - return gosec.NewIssue(ctx, assign, r.ID(), r.What, r.Severity, r.Confidence), nil + str := x.Name + "." + fun.Sel.Name + "(" + basiclit.Value + ")" + if r.pattern.MatchString(str) { + return ctx.NewIssue(assign, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -54,11 +55,11 @@ func NewDirectoryTraversal(id string, conf gosec.Config) (gosec.Rule, []ast.Node return &traversal{ pattern: regexp.MustCompile(pattern), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential directory traversal", - Confidence: gosec.Medium, - Severity: gosec.Medium, + Confidence: issue.Medium, + Severity: issue.Medium, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/errors.go b/vendor/github.com/securego/gosec/v2/rules/errors.go index 0838382b32..d31248ccb4 100644 --- a/vendor/github.com/securego/gosec/v2/rules/errors.go +++ b/vendor/github.com/securego/gosec/v2/rules/errors.go @@ -19,10 +19,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type noErrorCheck struct { - gosec.MetaData + issue.MetaData whitelist gosec.CallList } @@ -49,7 +50,7 @@ func returnsError(callExpr *ast.CallExpr, ctx *gosec.Context) int { return -1 } -func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: cfg := ctx.Config @@ -61,7 +62,7 @@ func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro return nil, nil } if id, ok := stmt.Lhs[pos].(*ast.Ident); ok && id.Name == "_" { - return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -70,7 +71,7 @@ func (r *noErrorCheck) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro if callExpr, ok := stmt.X.(*ast.CallExpr); ok && r.whitelist.ContainsCallExpr(stmt.X, ctx) == nil { pos := returnsError(callExpr, ctx) if pos >= 0 { - return gosec.NewIssue(ctx, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -100,10 +101,10 @@ func NewNoErrorCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { } return &noErrorCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Low, - Confidence: gosec.High, + Severity: issue.Low, + Confidence: issue.High, What: "Errors unhandled.", }, whitelist: whitelist, diff --git a/vendor/github.com/securego/gosec/v2/rules/fileperms.go b/vendor/github.com/securego/gosec/v2/rules/fileperms.go index a379a8c0b6..5311f74c6c 100644 --- a/vendor/github.com/securego/gosec/v2/rules/fileperms.go +++ b/vendor/github.com/securego/gosec/v2/rules/fileperms.go @@ -20,15 +20,17 @@ import ( "strconv" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type filePermissions struct { - gosec.MetaData + issue.MetaData mode int64 pkgs []string calls []string } +// ID returns the ID of the rule. func (r *filePermissions) ID() string { return r.MetaData.ID } @@ -50,12 +52,17 @@ func getConfiguredMode(conf map[string]interface{}, configKey string, defaultMod return mode } -func (r *filePermissions) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func modeIsSubset(subset int64, superset int64) bool { + return (subset | superset) == superset +} + +// Match checks if the rule is matched. +func (r *filePermissions) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { for _, pkg := range r.pkgs { if callexpr, matched := gosec.MatchCallByPackage(n, c, pkg, r.calls...); matched { modeArg := callexpr.Args[len(callexpr.Args)-1] - if mode, err := gosec.GetInt(modeArg); err == nil && mode > r.mode { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + if mode, err := gosec.GetInt(modeArg); err == nil && !modeIsSubset(mode, r.mode) { + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -69,10 +76,10 @@ func NewWritePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode: mode, pkgs: []string{"io/ioutil", "os"}, calls: []string{"WriteFile"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("Expect WriteFile permissions to be %#o or less", mode), }, }, []ast.Node{(*ast.CallExpr)(nil)} @@ -86,10 +93,10 @@ func NewFilePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode: mode, pkgs: []string{"os"}, calls: []string{"OpenFile", "Chmod"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("Expect file permissions to be %#o or less", mode), }, }, []ast.Node{(*ast.CallExpr)(nil)} @@ -103,11 +110,55 @@ func NewMkdirPerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { mode: mode, pkgs: []string{"os"}, calls: []string{"Mkdir", "MkdirAll"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("Expect directory permissions to be %#o or less", mode), }, }, []ast.Node{(*ast.CallExpr)(nil)} } + +type osCreatePermissions struct { + issue.MetaData + mode int64 + pkgs []string + calls []string +} + +const defaultOsCreateMode = 0o666 + +// ID returns the ID of the rule. +func (r *osCreatePermissions) ID() string { + return r.MetaData.ID +} + +// Match checks if the rule is matched. +func (r *osCreatePermissions) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { + for _, pkg := range r.pkgs { + if _, matched := gosec.MatchCallByPackage(n, c, pkg, r.calls...); matched { + if !modeIsSubset(defaultOsCreateMode, r.mode) { + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + return nil, nil +} + +// NewOsCreatePerms reates a rule to detect file creation with a more permissive than configured +// permission mask. +func NewOsCreatePerms(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { + mode := getConfiguredMode(conf, id, 0o666) + return &osCreatePermissions{ + mode: mode, + pkgs: []string{"os"}, + calls: []string{"Create"}, + MetaData: issue.MetaData{ + ID: id, + Severity: issue.Medium, + Confidence: issue.High, + What: fmt.Sprintf("Expect file permissions to be %#o or less but os.Create used with default permissions %#o", + mode, defaultOsCreateMode), + }, + }, []ast.Node{(*ast.CallExpr)(nil)} +} diff --git a/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go index cf2e6638d9..ed1fb947d1 100644 --- a/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go +++ b/vendor/github.com/securego/gosec/v2/rules/hardcoded_credentials.go @@ -15,17 +15,180 @@ package rules import ( + "fmt" "go/ast" "go/token" "regexp" "strconv" - zxcvbn "github.com/nbutton23/zxcvbn-go" + zxcvbn "github.com/ccojocar/zxcvbn-go" + "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) +type secretPattern struct { + name string + regexp *regexp.Regexp +} + +var secretsPatterns = [...]secretPattern{ + { + name: "RSA private key", + regexp: regexp.MustCompile(`-----BEGIN RSA PRIVATE KEY-----`), + }, + { + name: "SSH (DSA) private key", + regexp: regexp.MustCompile(`-----BEGIN DSA PRIVATE KEY-----`), + }, + { + name: "SSH (EC) private key", + regexp: regexp.MustCompile(`-----BEGIN EC PRIVATE KEY-----`), + }, + { + name: "PGP private key block", + regexp: regexp.MustCompile(`-----BEGIN PGP PRIVATE KEY BLOCK-----`), + }, + { + name: "Slack Token", + regexp: regexp.MustCompile(`xox[pborsa]-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32}`), + }, + { + name: "AWS API Key", + regexp: regexp.MustCompile(`AKIA[0-9A-Z]{16}`), + }, + { + name: "Amazon MWS Auth Token", + regexp: regexp.MustCompile(`amzn\.mws\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}`), + }, + { + name: "AWS AppSync GraphQL Key", + regexp: regexp.MustCompile(`da2-[a-z0-9]{26}`), + }, + { + name: "GitHub personal access token", + regexp: regexp.MustCompile(`ghp_[a-zA-Z0-9]{36}`), + }, + { + name: "GitHub fine-grained access token", + regexp: regexp.MustCompile(`github_pat_[a-zA-Z0-9]{22}_[a-zA-Z0-9]{59}`), + }, + { + name: "GitHub action temporary token", + regexp: regexp.MustCompile(`ghs_[a-zA-Z0-9]{36}`), + }, + { + name: "Google API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Cloud Platform API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Cloud Platform OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Google Drive API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Drive OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Google (GCP) Service-account", + regexp: regexp.MustCompile(`"type": "service_account"`), + }, + { + name: "Google Gmail API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google Gmail OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Google OAuth Access Token", + regexp: regexp.MustCompile(`ya29\.[0-9A-Za-z\-_]+`), + }, + { + name: "Google YouTube API Key", + regexp: regexp.MustCompile(`AIza[0-9A-Za-z\-_]{35}`), + }, + { + name: "Google YouTube OAuth", + regexp: regexp.MustCompile(`[0-9]+-[0-9A-Za-z_]{32}\.apps\.googleusercontent\.com`), + }, + { + name: "Generic API Key", + regexp: regexp.MustCompile(`[aA][pP][iI]_?[kK][eE][yY].*[''|"][0-9a-zA-Z]{32,45}[''|"]`), + }, + { + name: "Generic Secret", + regexp: regexp.MustCompile(`[sS][eE][cC][rR][eE][tT].*[''|"][0-9a-zA-Z]{32,45}[''|"]`), + }, + { + name: "Heroku API Key", + regexp: regexp.MustCompile(`[hH][eE][rR][oO][kK][uU].*[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}`), + }, + { + name: "MailChimp API Key", + regexp: regexp.MustCompile(`[0-9a-f]{32}-us[0-9]{1,2}`), + }, + { + name: "Mailgun API Key", + regexp: regexp.MustCompile(`key-[0-9a-zA-Z]{32}`), + }, + { + name: "Password in URL", + regexp: regexp.MustCompile(`[a-zA-Z]{3,10}://[^/\\s:@]{3,20}:[^/\\s:@]{3,20}@.{1,100}["'\\s]`), + }, + { + name: "Slack Webhook", + regexp: regexp.MustCompile(`https://hooks\.slack\.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}`), + }, + { + name: "Stripe API Key", + regexp: regexp.MustCompile(`sk_live_[0-9a-zA-Z]{24}`), + }, + { + name: "Stripe API Key", + regexp: regexp.MustCompile(`sk_live_[0-9a-zA-Z]{24}`), + }, + { + name: "Stripe Restricted API Key", + regexp: regexp.MustCompile(`rk_live_[0-9a-zA-Z]{24}`), + }, + { + name: "Square Access Token", + regexp: regexp.MustCompile(`sq0atp-[0-9A-Za-z\-_]{22}`), + }, + { + name: "Square OAuth Secret", + regexp: regexp.MustCompile(`sq0csp-[0-9A-Za-z\-_]{43}`), + }, + { + name: "Telegram Bot API Key", + regexp: regexp.MustCompile(`[0-9]+:AA[0-9A-Za-z\-_]{33}`), + }, + { + name: "Twilio API Key", + regexp: regexp.MustCompile(`SK[0-9a-fA-F]{32}`), + }, + { + name: "Twitter Access Token", + regexp: regexp.MustCompile(`[tT][wW][iI][tT][tT][eE][rR].*[1-9][0-9]+-[0-9a-zA-Z]{40}`), + }, + { + name: "Twitter OAuth", + regexp: regexp.MustCompile(`[tT][wW][iI][tT][tT][eE][rR].*[''|"][0-9a-zA-Z]{35,44}[''|"]`), + }, +} + type credentials struct { - gosec.MetaData + issue.MetaData pattern *regexp.Regexp entropyThreshold float64 perCharThreshold float64 @@ -53,7 +216,16 @@ func (r *credentials) isHighEntropyString(str string) bool { entropyPerChar >= r.perCharThreshold)) } -func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) isSecretPattern(str string) (bool, string) { + for _, pattern := range secretsPatterns { + if pattern.regexp.MatchString(str) { + return true, pattern.name + } + } + return false, "" +} + +func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.AssignStmt: return r.matchAssign(node, ctx) @@ -65,24 +237,41 @@ func (r *credentials) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error return nil, nil } -func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) matchAssign(assign *ast.AssignStmt, ctx *gosec.Context) (*issue.Issue, error) { for _, i := range assign.Lhs { if ident, ok := i.(*ast.Ident); ok { + // First check LHS to find anything being assigned to variables whose name appears to be a cred if r.pattern.MatchString(ident.Name) { for _, e := range assign.Rhs { if val, err := gosec.GetString(e); err == nil { if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { - return gosec.NewIssue(ctx, assign, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(assign, r.ID(), r.What, r.Severity, r.Confidence), nil } } } } + + // Now that no names were matched, match the RHS to see if the actual values being assigned are creds + for _, e := range assign.Rhs { + val, err := gosec.GetString(e) + if err != nil { + continue + } + + if r.ignoreEntropy || r.isHighEntropyString(val) { + if ok, patternName := r.isSecretPattern(val); ok { + return ctx.NewIssue(assign, r.ID(), fmt.Sprintf("%s: %s", r.What, patternName), r.Severity, r.Confidence), nil + } + } + } } } return nil, nil } -func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Context) (*issue.Issue, error) { + // Running match against the variable name(s) first. Will catch any creds whose var name matches the pattern, + // then will go back over to check the values themselves. for index, ident := range valueSpec.Names { if r.pattern.MatchString(ident.Name) && valueSpec.Values != nil { // const foo, bar = "same value" @@ -91,22 +280,57 @@ func (r *credentials) matchValueSpec(valueSpec *ast.ValueSpec, ctx *gosec.Contex } if val, err := gosec.GetString(valueSpec.Values[index]); err == nil { if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { - return gosec.NewIssue(ctx, valueSpec, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(valueSpec, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + } + + // Now that no variable names have been matched, match the actual values to find any creds + for _, ident := range valueSpec.Values { + if val, err := gosec.GetString(ident); err == nil { + if r.ignoreEntropy || r.isHighEntropyString(val) { + if ok, patternName := r.isSecretPattern(val); ok { + return ctx.NewIssue(valueSpec, r.ID(), fmt.Sprintf("%s: %s", r.What, patternName), r.Severity, r.Confidence), nil } } } } + return nil, nil } -func (r *credentials) matchEqualityCheck(binaryExpr *ast.BinaryExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *credentials) matchEqualityCheck(binaryExpr *ast.BinaryExpr, ctx *gosec.Context) (*issue.Issue, error) { if binaryExpr.Op == token.EQL || binaryExpr.Op == token.NEQ { - if ident, ok := binaryExpr.X.(*ast.Ident); ok { - if r.pattern.MatchString(ident.Name) { - if val, err := gosec.GetString(binaryExpr.Y); err == nil { - if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { - return gosec.NewIssue(ctx, binaryExpr, r.ID(), r.What, r.Severity, r.Confidence), nil - } + ident, ok := binaryExpr.X.(*ast.Ident) + if !ok { + ident, _ = binaryExpr.Y.(*ast.Ident) + } + + if ident != nil && r.pattern.MatchString(ident.Name) { + valueNode := binaryExpr.Y + if !ok { + valueNode = binaryExpr.X + } + if val, err := gosec.GetString(valueNode); err == nil { + if r.ignoreEntropy || (!r.ignoreEntropy && r.isHighEntropyString(val)) { + return ctx.NewIssue(binaryExpr, r.ID(), r.What, r.Severity, r.Confidence), nil + } + } + } + + // Now that the variable names have been checked, and no matches were found, make sure that + // either the left or right operands is a string literal so we can match the value. + identStrConst, ok := binaryExpr.X.(*ast.BasicLit) + if !ok { + identStrConst, ok = binaryExpr.Y.(*ast.BasicLit) + } + + if ok && identStrConst.Kind == token.STRING { + s, _ := gosec.GetString(identStrConst) + if r.ignoreEntropy || r.isHighEntropyString(s) { + if ok, patternName := r.isSecretPattern(s); ok { + return ctx.NewIssue(binaryExpr, r.ID(), fmt.Sprintf("%s: %s", r.What, patternName), r.Severity, r.Confidence), nil } } } @@ -129,6 +353,7 @@ func NewHardcodedCredentials(id string, conf gosec.Config) (gosec.Rule, []ast.No pattern = cfgPattern } } + if configIgnoreEntropy, ok := conf["ignore_entropy"]; ok { if cfgIgnoreEntropy, ok := configIgnoreEntropy.(bool); ok { ignoreEntropy = cfgIgnoreEntropy @@ -163,11 +388,11 @@ func NewHardcodedCredentials(id string, conf gosec.Config) (gosec.Rule, []ast.No perCharThreshold: perCharThreshold, ignoreEntropy: ignoreEntropy, truncate: truncateString, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential hardcoded credentials", - Confidence: gosec.Low, - Severity: gosec.High, + Confidence: issue.Low, + Severity: issue.High, }, }, []ast.Node{(*ast.AssignStmt)(nil), (*ast.ValueSpec)(nil), (*ast.BinaryExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/http_serve.go b/vendor/github.com/securego/gosec/v2/rules/http_serve.go index e460b3a680..525ed4ebc7 100644 --- a/vendor/github.com/securego/gosec/v2/rules/http_serve.go +++ b/vendor/github.com/securego/gosec/v2/rules/http_serve.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type httpServeWithoutTimeouts struct { - gosec.MetaData + issue.MetaData pkg string calls []string } @@ -16,23 +17,23 @@ func (r *httpServeWithoutTimeouts) ID() string { return r.MetaData.ID } -func (r *httpServeWithoutTimeouts) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (r *httpServeWithoutTimeouts) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } return nil, nil } // NewHTTPServeWithoutTimeouts detects use of net/http serve functions that have no support for setting timeouts. -func NewHTTPServeWithoutTimeouts(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewHTTPServeWithoutTimeouts(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &httpServeWithoutTimeouts{ pkg: "net/http", calls: []string{"ListenAndServe", "ListenAndServeTLS", "Serve", "ServeTLS"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Use of net/http serve function that has no support for setting timeouts", - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go index b2668dec88..a7eabb20b4 100644 --- a/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go +++ b/vendor/github.com/securego/gosec/v2/rules/implicit_aliasing.go @@ -3,12 +3,14 @@ package rules import ( "go/ast" "go/token" + "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type implicitAliasing struct { - gosec.MetaData + issue.MetaData aliases map[*ast.Object]struct{} rightBrace token.Pos acceptableAlias []*ast.UnaryExpr @@ -27,7 +29,24 @@ func containsUnary(exprs []*ast.UnaryExpr, expr *ast.UnaryExpr) bool { return false } -func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func getIdentExpr(expr ast.Expr) (*ast.Ident, bool) { + return doGetIdentExpr(expr, false) +} + +func doGetIdentExpr(expr ast.Expr, hasSelector bool) (*ast.Ident, bool) { + switch node := expr.(type) { + case *ast.Ident: + return node, hasSelector + case *ast.SelectorExpr: + return doGetIdentExpr(node.X, true) + case *ast.UnaryExpr: + return doGetIdentExpr(node.X, hasSelector) + default: + return nil, false + } +} + +func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.RangeStmt: // When presented with a range statement, get the underlying Object bound to @@ -71,9 +90,13 @@ func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, er } // If we find a unary op of & (reference) of an object within r.aliases, complain. - if ident, ok := node.X.(*ast.Ident); ok && node.Op.String() == "&" { - if _, contains := r.aliases[ident.Obj]; contains { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + if identExpr, hasSelector := getIdentExpr(node); identExpr != nil && node.Op.String() == "&" { + if _, contains := r.aliases[identExpr.Obj]; contains { + _, isPointer := c.Info.TypeOf(identExpr).(*types.Pointer) + + if !hasSelector || !isPointer { + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil + } } } case *ast.ReturnStmt: @@ -89,15 +112,15 @@ func (r *implicitAliasing) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, er } // NewImplicitAliasing detects implicit memory aliasing of type: for blah := SomeCall() {... SomeOtherCall(&blah) ...} -func NewImplicitAliasing(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewImplicitAliasing(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &implicitAliasing{ aliases: make(map[*ast.Object]struct{}), rightBrace: token.NoPos, acceptableAlias: make([]*ast.UnaryExpr, 0), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.Medium, + Severity: issue.Medium, + Confidence: issue.Medium, What: "Implicit memory aliasing in for loop.", }, }, []ast.Node{(*ast.RangeStmt)(nil), (*ast.UnaryExpr)(nil), (*ast.ReturnStmt)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go index f55211a923..1d57906642 100644 --- a/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go +++ b/vendor/github.com/securego/gosec/v2/rules/integer_overflow.go @@ -19,10 +19,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type integerOverflowCheck struct { - gosec.MetaData + issue.MetaData calls gosec.CallList } @@ -30,7 +31,7 @@ func (i *integerOverflowCheck) ID() string { return i.MetaData.ID } -func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*issue.Issue, error) { var atoiVarObj map[*ast.Object]ast.Node // To check multiple lines, ctx.PassedValues is used to store temporary data. @@ -63,7 +64,7 @@ func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec. if idt, ok := n.Args[0].(*ast.Ident); ok { if _, ok := atoiVarObj[idt.Obj]; ok { // Detect int32(v) and int16(v) - return gosec.NewIssue(ctx, n, i.ID(), i.What, i.Severity, i.Confidence), nil + return ctx.NewIssue(n, i.ID(), i.What, i.Severity, i.Confidence), nil } } } @@ -74,14 +75,14 @@ func (i *integerOverflowCheck) Match(node ast.Node, ctx *gosec.Context) (*gosec. } // NewIntegerOverflowCheck detects if there is potential Integer OverFlow -func NewIntegerOverflowCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewIntegerOverflowCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("strconv", "Atoi") return &integerOverflowCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.High, - Confidence: gosec.Medium, + Severity: issue.High, + Confidence: issue.Medium, What: "Potential Integer overflow made by strconv.Atoi result conversion to int16/32", }, calls: calls, diff --git a/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go b/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go index 69037e18f9..1aac1fa201 100644 --- a/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go +++ b/vendor/github.com/securego/gosec/v2/rules/math_big_rat.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type usingOldMathBig struct { - gosec.MetaData + issue.MetaData calls gosec.CallList } @@ -15,18 +16,18 @@ func (r *usingOldMathBig) ID() string { return r.MetaData.ID } -func (r *usingOldMathBig) Match(node ast.Node, ctx *gosec.Context) (gi *gosec.Issue, err error) { +func (r *usingOldMathBig) Match(node ast.Node, ctx *gosec.Context) (gi *issue.Issue, err error) { if callExpr := r.calls.ContainsPkgCallExpr(node, ctx, false); callExpr == nil { return nil, nil } - confidence := gosec.Low + confidence := issue.Low major, minor, build := gosec.GoVersion() if major == 1 && (minor == 16 && build < 14 || minor == 17 && build < 7) { - confidence = gosec.Medium + confidence = issue.Medium } - return gosec.NewIssue(ctx, node, r.ID(), r.What, r.Severity, confidence), nil + return ctx.NewIssue(node, r.ID(), r.What, r.Severity, confidence), nil } // NewUsingOldMathBig rule detects the use of Rat.SetString from math/big. @@ -35,10 +36,10 @@ func NewUsingOldMathBig(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls.Add("math/big.Rat", "SetString") return &usingOldMathBig{ calls: calls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential uncontrolled memory consumption in Rat.SetString (CVE-2022-23772)", - Severity: gosec.High, + Severity: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/pprof.go b/vendor/github.com/securego/gosec/v2/rules/pprof.go index 4c99af7523..68498dd5e0 100644 --- a/vendor/github.com/securego/gosec/v2/rules/pprof.go +++ b/vendor/github.com/securego/gosec/v2/rules/pprof.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type pprofCheck struct { - gosec.MetaData + issue.MetaData importPath string importName string } @@ -18,22 +19,22 @@ func (p *pprofCheck) ID() string { } // Match checks for pprof imports -func (p *pprofCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (p *pprofCheck) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node, ok := n.(*ast.ImportSpec); ok { if p.importPath == unquote(node.Path.Value) && node.Name != nil && p.importName == node.Name.Name { - return gosec.NewIssue(c, node, p.ID(), p.What, p.Severity, p.Confidence), nil + return c.NewIssue(node, p.ID(), p.What, p.Severity, p.Confidence), nil } } return nil, nil } // NewPprofCheck detects when the profiling endpoint is automatically exposed -func NewPprofCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewPprofCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &pprofCheck{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.High, - Confidence: gosec.High, + Severity: issue.High, + Confidence: issue.High, What: "Profiling endpoint is automatically exposed on /debug/pprof", }, importPath: "net/http/pprof", diff --git a/vendor/github.com/securego/gosec/v2/rules/rand.go b/vendor/github.com/securego/gosec/v2/rules/rand.go index 055adce4d4..4491fd9284 100644 --- a/vendor/github.com/securego/gosec/v2/rules/rand.go +++ b/vendor/github.com/securego/gosec/v2/rules/rand.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type weakRand struct { - gosec.MetaData + issue.MetaData funcNames []string packagePath string } @@ -30,10 +31,10 @@ func (w *weakRand) ID() string { return w.MetaData.ID } -func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { for _, funcName := range w.funcNames { if _, matched := gosec.MatchCallByPackage(n, c, w.packagePath, funcName); matched { - return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil + return c.NewIssue(n, w.ID(), w.What, w.Severity, w.Confidence), nil } } @@ -41,17 +42,17 @@ func (w *weakRand) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // NewWeakRandCheck detects the use of random number generator that isn't cryptographically secure -func NewWeakRandCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewWeakRandCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &weakRand{ funcNames: []string{ "New", "Read", "Float32", "Float64", "Int", "Int31", "Int31n", "Int63", "Int63n", "Intn", "NormalFloat64", "Uint32", "Uint64", }, packagePath: "math/rand", - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.High, - Confidence: gosec.Medium, + Severity: issue.High, + Confidence: issue.Medium, What: "Use of weak random number generator (math/rand instead of crypto/rand)", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/readfile.go b/vendor/github.com/securego/gosec/v2/rules/readfile.go index 579f2fa447..7ef4bbad13 100644 --- a/vendor/github.com/securego/gosec/v2/rules/readfile.go +++ b/vendor/github.com/securego/gosec/v2/rules/readfile.go @@ -19,13 +19,15 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type readfile struct { - gosec.MetaData + issue.MetaData gosec.CallList - pathJoin gosec.CallList - clean gosec.CallList + pathJoin gosec.CallList + clean gosec.CallList + cleanedVar map[any]ast.Node } // ID returns the identifier for this rule @@ -57,8 +59,11 @@ func (r *readfile) isJoinFunc(n ast.Node, c *gosec.Context) bool { return false } -// isFilepathClean checks if there is a filepath.Clean before assigning to a variable +// isFilepathClean checks if there is a filepath.Clean for given variable func (r *readfile) isFilepathClean(n *ast.Ident, c *gosec.Context) bool { + if _, ok := r.cleanedVar[n.Obj.Decl]; ok { + return true + } if n.Obj.Kind != ast.Var { return false } @@ -72,22 +77,38 @@ func (r *readfile) isFilepathClean(n *ast.Ident, c *gosec.Context) bool { return false } +// trackFilepathClean tracks back the declaration of variable from filepath.Clean argument +func (r *readfile) trackFilepathClean(n ast.Node) { + if clean, ok := n.(*ast.CallExpr); ok && len(clean.Args) > 0 { + if ident, ok := clean.Args[0].(*ast.Ident); ok { + // ident.Obj may be nil if the referenced declaration is in another file. It also may be incorrect. + // if it is nil, do not follow it. + if ident.Obj != nil { + r.cleanedVar[ident.Obj.Decl] = n + } + } + } +} + // Match inspects AST nodes to determine if the match the methods `os.Open` or `ioutil.ReadFile` -func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { - if node := r.ContainsPkgCallExpr(n, c, false); node != nil { +func (r *readfile) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { + if node := r.clean.ContainsPkgCallExpr(n, c, false); node != nil { + r.trackFilepathClean(n) + return nil, nil + } else if node := r.ContainsPkgCallExpr(n, c, false); node != nil { for _, arg := range node.Args { // handles path joining functions in Arg // eg. os.Open(filepath.Join("/tmp/", file)) if callExpr, ok := arg.(*ast.CallExpr); ok { if r.isJoinFunc(callExpr, c) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } // handles binary string concatenation eg. ioutil.Readfile("/tmp/" + file + "/blob") if binExp, ok := arg.(*ast.BinaryExpr); ok { // resolve all found identities from the BinaryExpr if _, ok := gosec.FindVarIdentities(binExp, c); ok { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } @@ -96,7 +117,7 @@ func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { if _, ok := obj.(*types.Var); ok && !gosec.TryResolve(ident, c) && !r.isFilepathClean(ident, c) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -105,17 +126,18 @@ func (r *readfile) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // NewReadFile detects cases where we read files -func NewReadFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewReadFile(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &readfile{ pathJoin: gosec.NewCallList(), clean: gosec.NewCallList(), CallList: gosec.NewCallList(), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential file inclusion via variable", - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, + cleanedVar: map[any]ast.Node{}, } rule.pathJoin.Add("path/filepath", "Join") rule.pathJoin.Add("path", "Join") diff --git a/vendor/github.com/securego/gosec/v2/rules/rsa.go b/vendor/github.com/securego/gosec/v2/rules/rsa.go index f2ed5db53d..331e7fc80a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/rsa.go +++ b/vendor/github.com/securego/gosec/v2/rules/rsa.go @@ -19,10 +19,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type weakKeyStrength struct { - gosec.MetaData + issue.MetaData calls gosec.CallList bits int } @@ -31,27 +32,27 @@ func (w *weakKeyStrength) ID() string { return w.MetaData.ID } -func (w *weakKeyStrength) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (w *weakKeyStrength) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if callExpr := w.calls.ContainsPkgCallExpr(n, c, false); callExpr != nil { if bits, err := gosec.GetInt(callExpr.Args[1]); err == nil && bits < (int64)(w.bits) { - return gosec.NewIssue(c, n, w.ID(), w.What, w.Severity, w.Confidence), nil + return c.NewIssue(n, w.ID(), w.What, w.Severity, w.Confidence), nil } } return nil, nil } // NewWeakKeyStrength builds a rule that detects RSA keys < 2048 bits -func NewWeakKeyStrength(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewWeakKeyStrength(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("crypto/rsa", "GenerateKey") bits := 2048 return &weakKeyStrength{ calls: calls, bits: bits, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: fmt.Sprintf("RSA keys should be at least %d bits", bits), }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/rulelist.go b/vendor/github.com/securego/gosec/v2/rules/rulelist.go index b97813ed02..f9ca4f52c4 100644 --- a/vendor/github.com/securego/gosec/v2/rules/rulelist.go +++ b/vendor/github.com/securego/gosec/v2/rules/rulelist.go @@ -91,7 +91,7 @@ func Generate(trackSuppressions bool, filters ...RuleFilter) RuleList { {"G304", "File path provided as taint input", NewReadFile}, {"G305", "File path traversal when extracting zip archive", NewArchive}, {"G306", "Poor file permissions used when writing to a file", NewWritePerms}, - {"G307", "Unsafe defer call of a method returning an error", NewDeferredClosing}, + {"G307", "Poor file permissions used when creating a file with os.Create", NewOsCreatePerms}, // crypto {"G401", "Detect the usage of DES, RC4, MD5 or SHA1", NewUsesWeakCryptography}, diff --git a/vendor/github.com/securego/gosec/v2/rules/slowloris.go b/vendor/github.com/securego/gosec/v2/rules/slowloris.go index 60b5e95211..70db73f5f3 100644 --- a/vendor/github.com/securego/gosec/v2/rules/slowloris.go +++ b/vendor/github.com/securego/gosec/v2/rules/slowloris.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type slowloris struct { - gosec.MetaData + issue.MetaData } func (r *slowloris) ID() string { @@ -44,13 +45,13 @@ func containsReadHeaderTimeout(node *ast.CompositeLit) bool { return false } -func (r *slowloris) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (r *slowloris) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch node := n.(type) { case *ast.CompositeLit: actualType := ctx.Info.TypeOf(node.Type) if actualType != nil && actualType.String() == "net/http.Server" { if !containsReadHeaderTimeout(node) { - return gosec.NewIssue(ctx, node, r.ID(), r.What, r.Severity, r.Confidence), nil + return ctx.NewIssue(node, r.ID(), r.What, r.Severity, r.Confidence), nil } } } @@ -58,13 +59,13 @@ func (r *slowloris) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) } // NewSlowloris attempts to find the http.Server struct and check if the ReadHeaderTimeout is configured. -func NewSlowloris(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSlowloris(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &slowloris{ - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server", - Confidence: gosec.Low, - Severity: gosec.Medium, + Confidence: issue.Low, + Severity: issue.Medium, }, }, []ast.Node{(*ast.CompositeLit)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/sql.go b/vendor/github.com/securego/gosec/v2/rules/sql.go index ee99737d64..61222bfdb3 100644 --- a/vendor/github.com/securego/gosec/v2/rules/sql.go +++ b/vendor/github.com/securego/gosec/v2/rules/sql.go @@ -20,10 +20,11 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type sqlStatement struct { - gosec.MetaData + issue.MetaData gosec.CallList // Contains a list of patterns which must all match for the rule to match. @@ -97,6 +98,32 @@ func (s *sqlStrConcat) ID() string { return s.MetaData.ID } +// findInjectionInBranch walks diwb a set if expressions, and will create new issues if it finds SQL injections +// This method assumes you've already verified that the branch contains SQL syntax +func (s *sqlStrConcat) findInjectionInBranch(ctx *gosec.Context, branch []ast.Expr) *ast.BinaryExpr { + for _, node := range branch { + be, ok := node.(*ast.BinaryExpr) + if !ok { + continue + } + + operands := gosec.GetBinaryExprOperands(be) + + for _, op := range operands { + if _, ok := op.(*ast.BasicLit); ok { + continue + } + + if ident, ok := op.(*ast.Ident); ok && s.checkObject(ident, ctx) { + continue + } + + return be + } + } + return nil +} + // see if we can figure out what it is func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool { if n.Obj != nil { @@ -113,7 +140,7 @@ func (s *sqlStrConcat) checkObject(n *ast.Ident, c *gosec.Context) bool { } // checkQuery verifies if the query parameters is a string concatenation -func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*issue.Issue, error) { query, err := findQueryArg(call, ctx) if err != nil { return nil, err @@ -134,7 +161,29 @@ func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gose if op, ok := op.(*ast.Ident); ok && s.checkObject(op, ctx) { continue } - return gosec.NewIssue(ctx, be, s.ID(), s.What, s.Severity, s.Confidence), nil + return ctx.NewIssue(be, s.ID(), s.What, s.Severity, s.Confidence), nil + } + } + } + + // Handle the case where an injection occurs as an infixed string concatenation, ie "SELECT * FROM foo WHERE name = '" + os.Args[0] + "' AND 1=1" + if id, ok := query.(*ast.Ident); ok { + var match bool + for _, str := range gosec.GetIdentStringValuesRecursive(id) { + if s.MatchPatterns(str) { + match = true + break + } + } + + if !match { + return nil, nil + } + + switch decl := id.Obj.Decl.(type) { + case *ast.AssignStmt: + if injection := s.findInjectionInBranch(ctx, decl.Rhs); injection != nil { + return ctx.NewIssue(injection, s.ID(), s.What, s.Severity, s.Confidence), nil } } } @@ -143,7 +192,7 @@ func (s *sqlStrConcat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gose } // Checks SQL query concatenation issues such as "SELECT * FROM table WHERE " + " ' OR 1=1" -func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: for _, expr := range stmt.Rhs { @@ -156,20 +205,21 @@ func (s *sqlStrConcat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro return s.checkQuery(sqlQueryCall, ctx) } } + return nil, nil } // NewSQLStrConcat looks for cases where we are building SQL strings via concatenation -func NewSQLStrConcat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSQLStrConcat(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &sqlStrConcat{ sqlStatement: sqlStatement{ patterns: []*regexp.Regexp{ - regexp.MustCompile(`(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE) `), + regexp.MustCompile("(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE)( |\n|\r|\t)"), }, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "SQL string concatenation", }, CallList: gosec.NewCallList(), @@ -212,7 +262,7 @@ func (s *sqlStrFormat) constObject(e ast.Expr, c *gosec.Context) bool { return false } -func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*issue.Issue, error) { query, err := findQueryArg(call, ctx) if err != nil { return nil, err @@ -233,7 +283,7 @@ func (s *sqlStrFormat) checkQuery(call *ast.CallExpr, ctx *gosec.Context) (*gose return nil, nil } -func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *gosec.Issue { +func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *issue.Issue { // argIndex changes the function argument which gets matched to the regex argIndex := 0 if node := s.fmtCalls.ContainsPkgCallExpr(n, ctx, false); node != nil { @@ -286,14 +336,14 @@ func (s *sqlStrFormat) checkFormatting(n ast.Node, ctx *gosec.Context) *gosec.Is } } if s.MatchPatterns(formatter) { - return gosec.NewIssue(ctx, n, s.ID(), s.What, s.Severity, s.Confidence) + return ctx.NewIssue(n, s.ID(), s.What, s.Severity, s.Confidence) } } return nil } // Check SQL query formatting issues such as "fmt.Sprintf("SELECT * FROM foo where '%s', userInput)" -func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, error) { +func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*issue.Issue, error) { switch stmt := n.(type) { case *ast.AssignStmt: for _, expr := range stmt.Rhs { @@ -323,7 +373,7 @@ func (s *sqlStrFormat) Match(n ast.Node, ctx *gosec.Context) (*gosec.Issue, erro } // NewSQLStrFormat looks for cases where we're building SQL query strings using format strings -func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSQLStrFormat(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &sqlStrFormat{ CallList: gosec.NewCallList(), fmtCalls: gosec.NewCallList(), @@ -334,10 +384,10 @@ func NewSQLStrFormat(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { regexp.MustCompile("(?i)(SELECT|DELETE|INSERT|UPDATE|INTO|FROM|WHERE)( |\n|\r|\t)"), regexp.MustCompile("%[^bdoxXfFp]"), }, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "SQL string formatting", }, }, diff --git a/vendor/github.com/securego/gosec/v2/rules/ssh.go b/vendor/github.com/securego/gosec/v2/rules/ssh.go index 01f37da510..e2ba5a3f4e 100644 --- a/vendor/github.com/securego/gosec/v2/rules/ssh.go +++ b/vendor/github.com/securego/gosec/v2/rules/ssh.go @@ -4,10 +4,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type sshHostKey struct { - gosec.MetaData + issue.MetaData pkg string calls []string } @@ -16,23 +17,23 @@ func (r *sshHostKey) ID() string { return r.MetaData.ID } -func (r *sshHostKey) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (r *sshHostKey) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } return nil, nil } // NewSSHHostKey rule detects the use of insecure ssh HostKeyCallback. -func NewSSHHostKey(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSSHHostKey(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &sshHostKey{ pkg: "golang.org/x/crypto/ssh", calls: []string{"InsecureIgnoreHostKey"}, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Use of ssh InsecureIgnoreHostKey should be audited", - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/ssrf.go b/vendor/github.com/securego/gosec/v2/rules/ssrf.go index 86bb8278d3..dbf01081b2 100644 --- a/vendor/github.com/securego/gosec/v2/rules/ssrf.go +++ b/vendor/github.com/securego/gosec/v2/rules/ssrf.go @@ -5,10 +5,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type ssrf struct { - gosec.MetaData + issue.MetaData gosec.CallList } @@ -40,25 +41,25 @@ func (r *ssrf) ResolveVar(n *ast.CallExpr, c *gosec.Context) bool { } // Match inspects AST nodes to determine if certain net/http methods are called with variable input -func (r *ssrf) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *ssrf) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { // Call expression is using http package directly if node := r.ContainsPkgCallExpr(n, c, false); node != nil { if r.ResolveVar(node, c) { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } return nil, nil } // NewSSRFCheck detects cases where HTTP requests are sent -func NewSSRFCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewSSRFCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { rule := &ssrf{ CallList: gosec.NewCallList(), - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, What: "Potential HTTP request made with variable url", - Severity: gosec.Medium, - Confidence: gosec.Medium, + Severity: issue.Medium, + Confidence: issue.Medium, }, } rule.AddAll("net/http", "Do", "Get", "Head", "Post", "PostForm", "RoundTrip") diff --git a/vendor/github.com/securego/gosec/v2/rules/subproc.go b/vendor/github.com/securego/gosec/v2/rules/subproc.go index 2b6cb186cd..1e2cedaa58 100644 --- a/vendor/github.com/securego/gosec/v2/rules/subproc.go +++ b/vendor/github.com/securego/gosec/v2/rules/subproc.go @@ -19,10 +19,11 @@ import ( "go/types" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type subprocess struct { - gosec.MetaData + issue.MetaData gosec.CallList } @@ -39,7 +40,7 @@ func (r *subprocess) ID() string { // is unsafe. For example: // // syscall.Exec("echo", "foobar" + tainted) -func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node := r.ContainsPkgCallExpr(n, c, false); node != nil { args := node.Args if r.isContext(n, c) { @@ -64,7 +65,7 @@ func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { _, assignment := ident.Obj.Decl.(*ast.AssignStmt) if variable && assignment { if !gosec.TryResolve(ident, c) { - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with variable", issue.Medium, issue.High), nil } } case *ast.Field: @@ -74,21 +75,21 @@ func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { vv, vvok := obj.(*types.Var) if vvok && vv.Parent().Lookup(ident.Name) == nil { - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with variable", issue.Medium, issue.High), nil } } case *ast.ValueSpec: _, valueSpec := ident.Obj.Decl.(*ast.ValueSpec) if variable && valueSpec { if !gosec.TryResolve(ident, c) { - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with variable", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with variable", issue.Medium, issue.High), nil } } } } } else if !gosec.TryResolve(arg, c) { // the arg is not a constant or a variable but instead a function call or os.Args[i] - return gosec.NewIssue(c, n, r.ID(), "Subprocess launched with a potential tainted input or cmd arguments", gosec.Medium, gosec.High), nil + return c.NewIssue(n, r.ID(), "Subprocess launched with a potential tainted input or cmd arguments", issue.Medium, issue.High), nil } } } @@ -96,7 +97,7 @@ func (r *subprocess) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { } // isContext checks whether or not the node is a CommandContext call or not -// Thi is required in order to skip the first argument from the check. +// This is required in order to skip the first argument from the check. func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool { selector, indent, err := gosec.GetCallInfo(n, ctx) if err != nil { @@ -109,8 +110,8 @@ func (r *subprocess) isContext(n ast.Node, ctx *gosec.Context) bool { } // NewSubproc detects cases where we are forking out to an external process -func NewSubproc(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { - rule := &subprocess{gosec.MetaData{ID: id}, gosec.NewCallList()} +func NewSubproc(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { + rule := &subprocess{issue.MetaData{ID: id}, gosec.NewCallList()} rule.Add("os/exec", "Command") rule.Add("os/exec", "CommandContext") rule.Add("syscall", "Exec") diff --git a/vendor/github.com/securego/gosec/v2/rules/tempfiles.go b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go index 63822c093c..6fef52a2cb 100644 --- a/vendor/github.com/securego/gosec/v2/rules/tempfiles.go +++ b/vendor/github.com/securego/gosec/v2/rules/tempfiles.go @@ -19,10 +19,11 @@ import ( "regexp" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type badTempFile struct { - gosec.MetaData + issue.MetaData calls gosec.CallList args *regexp.Regexp argCalls gosec.CallList @@ -33,15 +34,15 @@ func (t *badTempFile) ID() string { return t.MetaData.ID } -func (t *badTempFile) findTempDirArgs(n ast.Node, c *gosec.Context, suspect ast.Node) *gosec.Issue { +func (t *badTempFile) findTempDirArgs(n ast.Node, c *gosec.Context, suspect ast.Node) *issue.Issue { if s, e := gosec.GetString(suspect); e == nil { if t.args.MatchString(s) { - return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence) + return c.NewIssue(n, t.ID(), t.What, t.Severity, t.Confidence) } return nil } if ce := t.argCalls.ContainsPkgCallExpr(suspect, c, false); ce != nil { - return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence) + return c.NewIssue(n, t.ID(), t.What, t.Severity, t.Confidence) } if be, ok := suspect.(*ast.BinaryExpr); ok { if ops := gosec.GetBinaryExprOperands(be); len(ops) != 0 { @@ -55,7 +56,7 @@ func (t *badTempFile) findTempDirArgs(n ast.Node, c *gosec.Context, suspect ast. return nil } -func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil { return t.findTempDirArgs(n, c, node.Args[0]), nil } @@ -63,7 +64,7 @@ func (t *badTempFile) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err } // NewBadTempFile detects direct writes to predictable path in temporary directory -func NewBadTempFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewBadTempFile(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("io/ioutil", "WriteFile") calls.AddAll("os", "Create", "WriteFile") @@ -77,10 +78,10 @@ func NewBadTempFile(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { args: regexp.MustCompile(`^(/(usr|var))?/tmp(/.*)?$`), argCalls: argCalls, nestedCalls: nestedCalls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "File creation in shared tmp directory without using ioutil.Tempfile", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/templates.go b/vendor/github.com/securego/gosec/v2/rules/templates.go index 1eec7fba10..728766f457 100644 --- a/vendor/github.com/securego/gosec/v2/rules/templates.go +++ b/vendor/github.com/securego/gosec/v2/rules/templates.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type templateCheck struct { - gosec.MetaData + issue.MetaData calls gosec.CallList } @@ -29,11 +30,11 @@ func (t *templateCheck) ID() string { return t.MetaData.ID } -func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if node := t.calls.ContainsPkgCallExpr(n, c, false); node != nil { for _, arg := range node.Args { if _, ok := arg.(*ast.BasicLit); !ok { // basic lits are safe - return gosec.NewIssue(c, n, t.ID(), t.What, t.Severity, t.Confidence), nil + return c.NewIssue(n, t.ID(), t.What, t.Severity, t.Confidence), nil } } } @@ -42,7 +43,7 @@ func (t *templateCheck) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error // NewTemplateCheck constructs the template check rule. This rule is used to // find use of templates where HTML/JS escaping is not being used -func NewTemplateCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewTemplateCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := gosec.NewCallList() calls.Add("html/template", "HTML") calls.Add("html/template", "HTMLAttr") @@ -50,10 +51,10 @@ func NewTemplateCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { calls.Add("html/template", "URL") return &templateCheck{ calls: calls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.Low, + Severity: issue.Medium, + Confidence: issue.Low, What: "The used method does not auto-escape HTML. This can potentially lead to 'Cross-site Scripting' vulnerabilities, in case the attacker controls the input.", }, }, []ast.Node{(*ast.CallExpr)(nil)} diff --git a/vendor/github.com/securego/gosec/v2/rules/tls.go b/vendor/github.com/securego/gosec/v2/rules/tls.go index 76dfd84ff6..65a0b5a33a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/tls.go +++ b/vendor/github.com/securego/gosec/v2/rules/tls.go @@ -24,10 +24,11 @@ import ( "strconv" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type insecureConfigTLS struct { - gosec.MetaData + issue.MetaData MinVersion int64 MaxVersion int64 requiredType string @@ -49,13 +50,13 @@ func stringInSlice(a string, list []string) bool { return false } -func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *gosec.Issue { +func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) *issue.Issue { if ciphers, ok := n.(*ast.CompositeLit); ok { for _, cipher := range ciphers.Elts { if ident, ok := cipher.(*ast.SelectorExpr); ok { if !stringInSlice(ident.Sel.Name, t.goodCiphers) { err := fmt.Sprintf("TLS Bad Cipher Suite: %s", ident.Sel.Name) - return gosec.NewIssue(c, ident, t.ID(), err, gosec.High, gosec.High) + return c.NewIssue(ident, t.ID(), err, issue.High, issue.High) } } } @@ -63,31 +64,51 @@ func (t *insecureConfigTLS) processTLSCipherSuites(n ast.Node, c *gosec.Context) return nil } -func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Context) *gosec.Issue { - if ident, ok := n.Key.(*ast.Ident); ok { +func (t *insecureConfigTLS) processTLSConf(n ast.Node, c *gosec.Context) *issue.Issue { + if kve, ok := n.(*ast.KeyValueExpr); ok { + issue := t.processTLSConfVal(kve.Key, kve.Value, c) + if issue != nil { + return issue + } + } else if assign, ok := n.(*ast.AssignStmt); ok { + if len(assign.Lhs) < 1 || len(assign.Rhs) < 1 { + return nil + } + if selector, ok := assign.Lhs[0].(*ast.SelectorExpr); ok { + issue := t.processTLSConfVal(selector.Sel, assign.Rhs[0], c) + if issue != nil { + return issue + } + } + } + return nil +} + +func (t *insecureConfigTLS) processTLSConfVal(key ast.Expr, value ast.Expr, c *gosec.Context) *issue.Issue { + if ident, ok := key.(*ast.Ident); ok { switch ident.Name { case "InsecureSkipVerify": - if node, ok := n.Value.(*ast.Ident); ok { + if node, ok := value.(*ast.Ident); ok { if node.Name != "false" { - return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify set true.", gosec.High, gosec.High) + return c.NewIssue(value, t.ID(), "TLS InsecureSkipVerify set true.", issue.High, issue.High) } } else { // TODO(tk): symbol tab look up to get the actual value - return gosec.NewIssue(c, n, t.ID(), "TLS InsecureSkipVerify may be true.", gosec.High, gosec.Low) + return c.NewIssue(value, t.ID(), "TLS InsecureSkipVerify may be true.", issue.High, issue.Low) } case "PreferServerCipherSuites": - if node, ok := n.Value.(*ast.Ident); ok { + if node, ok := value.(*ast.Ident); ok { if node.Name == "false" { - return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites set false.", gosec.Medium, gosec.High) + return c.NewIssue(value, t.ID(), "TLS PreferServerCipherSuites set false.", issue.Medium, issue.High) } } else { // TODO(tk): symbol tab look up to get the actual value - return gosec.NewIssue(c, n, t.ID(), "TLS PreferServerCipherSuites may be false.", gosec.Medium, gosec.Low) + return c.NewIssue(value, t.ID(), "TLS PreferServerCipherSuites may be false.", issue.Medium, issue.Low) } case "MinVersion": - if d, ok := n.Value.(*ast.Ident); ok { + if d, ok := value.(*ast.Ident); ok { obj := d.Obj if obj == nil { for _, f := range c.PkgFiles { @@ -107,7 +128,7 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont tObj := imp.Scope().Lookup(sel) if cst, ok := tObj.(*types.Const); ok { // ..got the value check if this can be translated - if minVersion, err := strconv.ParseInt(cst.Val().String(), 10, 64); err == nil { + if minVersion, err := strconv.ParseInt(cst.Val().String(), 0, 64); err == nil { t.actualMinVersion = minVersion } } @@ -118,10 +139,10 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont t.actualMinVersion = ival } } - } else if ival, ierr := gosec.GetInt(n.Value); ierr == nil { + } else if ival, ierr := gosec.GetInt(value); ierr == nil { t.actualMinVersion = ival } else { - if se, ok := n.Value.(*ast.SelectorExpr); ok { + if se, ok := value.(*ast.SelectorExpr); ok { if pkg, ok := se.X.(*ast.Ident); ok { if ip, ok := gosec.GetImportPath(pkg.Name, c); ok && ip == "crypto/tls" { t.actualMinVersion = t.mapVersion(se.Sel.Name) @@ -131,10 +152,10 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont } case "MaxVersion": - if ival, ierr := gosec.GetInt(n.Value); ierr == nil { + if ival, ierr := gosec.GetInt(value); ierr == nil { t.actualMaxVersion = ival } else { - if se, ok := n.Value.(*ast.SelectorExpr); ok { + if se, ok := value.(*ast.SelectorExpr); ok { if pkg, ok := se.X.(*ast.Ident); ok { if ip, ok := gosec.GetImportPath(pkg.Name, c); ok && ip == "crypto/tls" { t.actualMaxVersion = t.mapVersion(se.Sel.Name) @@ -144,7 +165,7 @@ func (t *insecureConfigTLS) processTLSConfVal(n *ast.KeyValueExpr, c *gosec.Cont } case "CipherSuites": - if ret := t.processTLSCipherSuites(n.Value, c); ret != nil { + if ret := t.processTLSCipherSuites(value, c); ret != nil { return ret } @@ -168,16 +189,16 @@ func (t *insecureConfigTLS) mapVersion(version string) int64 { return v } -func (t *insecureConfigTLS) checkVersion(n ast.Node, c *gosec.Context) *gosec.Issue { +func (t *insecureConfigTLS) checkVersion(n ast.Node, c *gosec.Context) *issue.Issue { if t.actualMaxVersion == 0 && t.actualMinVersion >= t.MinVersion { // no warning is generated since the min version is greater than the secure min version return nil } if t.actualMinVersion < t.MinVersion { - return gosec.NewIssue(c, n, t.ID(), "TLS MinVersion too low.", gosec.High, gosec.High) + return c.NewIssue(n, t.ID(), "TLS MinVersion too low.", issue.High, issue.High) } if t.actualMaxVersion < t.MaxVersion { - return gosec.NewIssue(c, n, t.ID(), "TLS MaxVersion too low.", gosec.High, gosec.High) + return c.NewIssue(n, t.ID(), "TLS MaxVersion too low.", issue.High, issue.High) } return nil } @@ -187,22 +208,32 @@ func (t *insecureConfigTLS) resetVersion() { t.actualMinVersion = 0 } -func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (t *insecureConfigTLS) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { if complit, ok := n.(*ast.CompositeLit); ok && complit.Type != nil { actualType := c.Info.TypeOf(complit.Type) if actualType != nil && actualType.String() == t.requiredType { for _, elt := range complit.Elts { - if kve, ok := elt.(*ast.KeyValueExpr); ok { - issue := t.processTLSConfVal(kve, c) - if issue != nil { - return issue, nil - } + issue := t.processTLSConf(elt, c) + if issue != nil { + return issue, nil } } issue := t.checkVersion(complit, c) t.resetVersion() return issue, nil } + } else { + if assign, ok := n.(*ast.AssignStmt); ok && len(assign.Lhs) > 0 { + if selector, ok := assign.Lhs[0].(*ast.SelectorExpr); ok { + actualType := c.Info.TypeOf(selector.X) + if actualType != nil && actualType.String() == t.requiredType { + issue := t.processTLSConf(assign, c) + if issue != nil { + return issue, nil + } + } + } + } } return nil, nil } diff --git a/vendor/github.com/securego/gosec/v2/rules/tls_config.go b/vendor/github.com/securego/gosec/v2/rules/tls_config.go index 5d68593d82..cbbdf7983a 100644 --- a/vendor/github.com/securego/gosec/v2/rules/tls_config.go +++ b/vendor/github.com/securego/gosec/v2/rules/tls_config.go @@ -4,13 +4,14 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) // NewModernTLSCheck creates a check for Modern TLS ciphers // DO NOT EDIT - generated by tlsconfig tool -func NewModernTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewModernTLSCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &insecureConfigTLS{ - MetaData: gosec.MetaData{ID: id}, + MetaData: issue.MetaData{ID: id}, requiredType: "crypto/tls.Config", MinVersion: 0x0304, MaxVersion: 0x0304, @@ -19,14 +20,14 @@ func NewModernTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + }, []ast.Node{(*ast.CompositeLit)(nil), (*ast.AssignStmt)(nil)} } // NewIntermediateTLSCheck creates a check for Intermediate TLS ciphers // DO NOT EDIT - generated by tlsconfig tool -func NewIntermediateTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewIntermediateTLSCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &insecureConfigTLS{ - MetaData: gosec.MetaData{ID: id}, + MetaData: issue.MetaData{ID: id}, requiredType: "crypto/tls.Config", MinVersion: 0x0303, MaxVersion: 0x0304, @@ -45,14 +46,14 @@ func NewIntermediateTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.No "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_DHE_RSA_WITH_AES_256_GCM_SHA384", }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + }, []ast.Node{(*ast.CompositeLit)(nil), (*ast.AssignStmt)(nil)} } // NewOldTLSCheck creates a check for Old TLS ciphers // DO NOT EDIT - generated by tlsconfig tool -func NewOldTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewOldTLSCheck(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &insecureConfigTLS{ - MetaData: gosec.MetaData{ID: id}, + MetaData: issue.MetaData{ID: id}, requiredType: "crypto/tls.Config", MinVersion: 0x0301, MaxVersion: 0x0304, @@ -88,5 +89,5 @@ func NewOldTLSCheck(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", }, - }, []ast.Node{(*ast.CompositeLit)(nil)} + }, []ast.Node{(*ast.CompositeLit)(nil), (*ast.AssignStmt)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/unsafe.go b/vendor/github.com/securego/gosec/v2/rules/unsafe.go index 88a298fb52..2e2adca7c7 100644 --- a/vendor/github.com/securego/gosec/v2/rules/unsafe.go +++ b/vendor/github.com/securego/gosec/v2/rules/unsafe.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type usingUnsafe struct { - gosec.MetaData + issue.MetaData pkg string calls []string } @@ -30,24 +31,24 @@ func (r *usingUnsafe) ID() string { return r.MetaData.ID } -func (r *usingUnsafe) Match(n ast.Node, c *gosec.Context) (gi *gosec.Issue, err error) { +func (r *usingUnsafe) Match(n ast.Node, c *gosec.Context) (gi *issue.Issue, err error) { if _, matches := gosec.MatchCallByPackage(n, c, r.pkg, r.calls...); matches { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } return nil, nil } // NewUsingUnsafe rule detects the use of the unsafe package. This is only // really useful for auditing purposes. -func NewUsingUnsafe(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewUsingUnsafe(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { return &usingUnsafe{ pkg: "unsafe", - calls: []string{"Alignof", "Offsetof", "Sizeof", "Pointer"}, - MetaData: gosec.MetaData{ + calls: []string{"Pointer", "String", "StringData", "Slice", "SliceData"}, + MetaData: issue.MetaData{ ID: id, What: "Use of unsafe calls should be audited", - Severity: gosec.Low, - Confidence: gosec.High, + Severity: issue.Low, + Confidence: issue.High, }, }, []ast.Node{(*ast.CallExpr)(nil)} } diff --git a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go index eecb88f046..4f2ab11d15 100644 --- a/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go +++ b/vendor/github.com/securego/gosec/v2/rules/weakcrypto.go @@ -18,10 +18,11 @@ import ( "go/ast" "github.com/securego/gosec/v2" + "github.com/securego/gosec/v2/issue" ) type usesWeakCryptography struct { - gosec.MetaData + issue.MetaData blocklist map[string][]string } @@ -29,17 +30,17 @@ func (r *usesWeakCryptography) ID() string { return r.MetaData.ID } -func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*gosec.Issue, error) { +func (r *usesWeakCryptography) Match(n ast.Node, c *gosec.Context) (*issue.Issue, error) { for pkg, funcs := range r.blocklist { if _, matched := gosec.MatchCallByPackage(n, c, pkg, funcs...); matched { - return gosec.NewIssue(c, n, r.ID(), r.What, r.Severity, r.Confidence), nil + return c.NewIssue(n, r.ID(), r.What, r.Severity, r.Confidence), nil } } return nil, nil } // NewUsesWeakCryptography detects uses of des.* md5.* or rc4.* -func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.Node) { +func NewUsesWeakCryptography(id string, _ gosec.Config) (gosec.Rule, []ast.Node) { calls := make(map[string][]string) calls["crypto/des"] = []string{"NewCipher", "NewTripleDESCipher"} calls["crypto/md5"] = []string{"New", "Sum"} @@ -47,10 +48,10 @@ func NewUsesWeakCryptography(id string, conf gosec.Config) (gosec.Rule, []ast.No calls["crypto/rc4"] = []string{"NewCipher"} rule := &usesWeakCryptography{ blocklist: calls, - MetaData: gosec.MetaData{ + MetaData: issue.MetaData{ ID: id, - Severity: gosec.Medium, - Confidence: gosec.High, + Severity: issue.Medium, + Confidence: issue.High, What: "Use of weak cryptographic primitive", }, } diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index b042c896f2..d1d4a85fd7 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1b6..074fd4b8bd 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function for scanner.Scan() { - printFunc(scanner.Text()) + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/github.com/sivchari/containedctx/.golangci.yml b/vendor/github.com/sivchari/containedctx/.golangci.yml new file mode 100644 index 0000000000..f687df8362 --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/.golangci.yml @@ -0,0 +1,38 @@ +run: + timeout: 5m + skip-files: [] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 12 + misspell: + locale: US + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + +linters: + disable-all: true + enable: + - govet + - revive + - goimports + - staticcheck + - gosimple + - unused + - godox + - gofumpt + - misspell + - gocyclo + +issues: + exclude-use-default: true + max-per-linter: 0 + max-same-issues: 0 + exclude: [] diff --git a/vendor/github.com/sivchari/containedctx/LICENCE b/vendor/github.com/sivchari/containedctx/LICENCE new file mode 100644 index 0000000000..5185ec09a5 --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/LICENCE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 sivchari + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sivchari/containedctx/README.md b/vendor/github.com/sivchari/containedctx/README.md new file mode 100644 index 0000000000..0c2dd208d4 --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/README.md @@ -0,0 +1,64 @@ +# containedctx + +[![test_and_lint](https://github.com/sivchari/containedctx/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/sivchari/containedctx/actions/workflows/ci.yml) + +containedctx is a linter that detects struct contained context.Context field. +This is discouraged technique in favour of passing context as first argument of method or function. +For rationale please read [Contexts and structs](https://go.dev/blog/context-and-structs) the Go blog post. + +## Instruction + +```sh +go install github.com/sivchari/containedctx/cmd/containedctx +``` + +## Usage + +```go +package main + +import "context" + +type ok struct { + i int + s string +} + +type ng struct { + ctx context.Context +} + +type empty struct{} +``` + +```console +go vet -vettool=(which containedctx) ./... + +# a +./main.go:11:2: found a struct that contains a context.Context field +``` + + +## CI + +### CircleCI + +```yaml +- run: + name: install containedctx + command: go install github.com/sivchari/containedctx/cmd/containedctx + +- run: + name: run containedctx + command: go vet -vettool=`which containedctx` ./... +``` + +### GitHub Actions + +```yaml +- name: install containedctx + run: go install github.com/sivchari/containedctx/cmd/containedctx + +- name: run containedctx + run: go vet -vettool=`which containedctx` ./... +``` diff --git a/vendor/github.com/sivchari/containedctx/containedctx.go b/vendor/github.com/sivchari/containedctx/containedctx.go new file mode 100644 index 0000000000..0260d6a6eb --- /dev/null +++ b/vendor/github.com/sivchari/containedctx/containedctx.go @@ -0,0 +1,45 @@ +package containedctx + +import ( + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "containedctx is a linter that detects struct contained context.Context field" + +// Analyzer is the contanedctx analyzer +var Analyzer = &analysis.Analyzer{ + Name: "containedctx", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch structTyp := n.(type) { + case *ast.StructType: + if structTyp.Fields.List == nil { + return + } + for _, field := range structTyp.Fields.List { + if pass.TypesInfo.TypeOf(field.Type).String() == "context.Context" { + pass.Reportf(field.Pos(), "found a struct that contains a context.Context field") + } + } + } + }) + + return nil, nil +} diff --git a/vendor/github.com/sivchari/nosnakecase/.gitignore b/vendor/github.com/sivchari/nosnakecase/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/sivchari/nosnakecase/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/sivchari/nosnakecase/.golangci.yml b/vendor/github.com/sivchari/nosnakecase/.golangci.yml new file mode 100644 index 0000000000..31e05c4ee1 --- /dev/null +++ b/vendor/github.com/sivchari/nosnakecase/.golangci.yml @@ -0,0 +1,40 @@ +run: + timeout: 5m + skip-files: [] + go: '1.17' + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 18 + misspell: + locale: US + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + +linters: + disable-all: true + enable: + - govet + - revive + - goimports + - staticcheck + - gosimple + - unused + - godox + - gofumpt + - misspell + - gocyclo + +issues: + exclude-use-default: true + max-per-linter: 0 + max-same-issues: 0 + exclude: [] + diff --git a/vendor/github.com/sivchari/nosnakecase/LICENSE b/vendor/github.com/sivchari/nosnakecase/LICENSE new file mode 100644 index 0000000000..fb41276774 --- /dev/null +++ b/vendor/github.com/sivchari/nosnakecase/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 sivchari + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sivchari/nosnakecase/README.md b/vendor/github.com/sivchari/nosnakecase/README.md new file mode 100644 index 0000000000..69bb660462 --- /dev/null +++ b/vendor/github.com/sivchari/nosnakecase/README.md @@ -0,0 +1,224 @@ +# nosnakecase +nosnakecase is a linter that detects snake case of variable naming and function name. + +## Instruction + +```sh +go install github.com/sivchari/nosnakecase/cmd/nosnakecase@latest +``` + +## Usage + +```go +package sandbox + +// global variable name with underscore. +var v_v = 0 // want "v_v is used under score. You should use mixedCap or MixedCap." + +// global constant name with underscore. +const c_c = 0 // want "c_c is used under score. You should use mixedCap or MixedCap." + +// struct name with underscore. +type S_a struct { // want "S_a is used under score. You should use mixedCap or MixedCap." + fi int +} + +// non-exported struct field name with underscore. +type Sa struct { + fi_a int // // want "fi_a is used under score. You should use mixedCap or MixedCap." +} + +// function as struct field, with parameter name with underscore. +type Sb struct { + fib func(p_a int) // want "p_a is used under score. You should use mixedCap or MixedCap." +} + +// exported struct field with underscore. +type Sc struct { + Fi_A int // want "Fi_A is used under score. You should use mixedCap or MixedCap." +} + +// function as struct field, with return name with underscore. +type Sd struct { + fib func(p int) (r_a int) // want "r_a is used under score. You should use mixedCap or MixedCap." +} + +// interface name with underscore. +type I_a interface { // want "I_a is used under score. You should use mixedCap or MixedCap." + fn(p int) +} + +// interface with parameter name with underscore. +type Ia interface { + fn(p_a int) // want "p_a is used under score. You should use mixedCap or MixedCap." +} + +// interface with parameter name with underscore. +type Ib interface { + Fn(p_a int) // want "p_a is used under score. You should use mixedCap or MixedCap." +} + +// function as struct field, with return name with underscore. +type Ic interface { + Fn_a() // want "Fn_a is used under score. You should use mixedCap or MixedCap." +} + +// interface with return name with underscore. +type Id interface { + Fn() (r_a int) // want "r_a is used under score. You should use mixedCap or MixedCap." +} + +// function name with underscore. +func f_a() {} // want "f_a is used under score. You should use mixedCap or MixedCap." + +// function's parameter name with underscore. +func fb(p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + +// named return with underscore. +func fc() (r_b int) { // want "r_b is used under score. You should use mixedCap or MixedCap." + return 0 +} + +// local variable (short declaration) with underscore. +func fd(p int) int { + v_b := p * 2 // want "v_b is used under score. You should use mixedCap or MixedCap." + + return v_b // want "v_b is used under score. You should use mixedCap or MixedCap." +} + +// local constant with underscore. +func fe(p int) int { + const v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap." + + return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap." +} + +// local variable with underscore. +func ff(p int) int { + var v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap." + + return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap." +} + +// inner function, parameter name with underscore. +func fg() { + fgl := func(p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + fgl(1) +} + +type Foo struct{} + +// method name with underscore. +func (f Foo) f_a() {} // want "f_a is used under score. You should use mixedCap or MixedCap." + +// method's parameter name with underscore. +func (f Foo) fb(p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + +// named return with underscore. +func (f Foo) fc() (r_b int) { return 0 } // want "r_b is used under score. You should use mixedCap or MixedCap." + +// local variable (short declaration) with underscore. +func (f Foo) fd(p int) int { + v_b := p * 2 // want "v_b is used under score. You should use mixedCap or MixedCap." + + return v_b // want "v_b is used under score. You should use mixedCap or MixedCap." +} + +// local constant with underscore. +func (f Foo) fe(p int) int { + const v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap." + + return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap." +} + +// local variable with underscore. +func (f Foo) ff(p int) int { + var v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap." + + return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap." +} + +func fna(a, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + +func fna1(a string, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + +func fnb(a, b, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + +func fnb1(a, b string, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap." + +func fnd( + p_a int, // want "p_a is used under score. You should use mixedCap or MixedCap." + p_b int, // want "p_b is used under score. You should use mixedCap or MixedCap." + p_c int, // want "p_c is used under score. You should use mixedCap or MixedCap." +) { +} +``` + +```console +go vet -vettool=(which nosnakecase) ./... + +# command-line-arguments +# a +./a.go:4:5: v_v is used under score. You should use mixedCap or MixedCap. +./a.go:7:7: c_c is used under score. You should use mixedCap or MixedCap. +./a.go:10:6: S_a is used under score. You should use mixedCap or MixedCap. +./a.go:16:2: fi_a is used under score. You should use mixedCap or MixedCap. +./a.go:21:11: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:26:2: Fi_A is used under score. You should use mixedCap or MixedCap. +./a.go:31:19: r_a is used under score. You should use mixedCap or MixedCap. +./a.go:35:6: I_a is used under score. You should use mixedCap or MixedCap. +./a.go:41:5: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:46:5: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:51:2: Fn_a is used under score. You should use mixedCap or MixedCap. +./a.go:56:8: r_a is used under score. You should use mixedCap or MixedCap. +./a.go:60:6: f_a is used under score. You should use mixedCap or MixedCap. +./a.go:63:9: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:66:12: r_b is used under score. You should use mixedCap or MixedCap. +./a.go:72:2: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:74:9: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:79:8: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:81:9: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:86:6: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:88:9: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:93:14: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:100:14: f_a is used under score. You should use mixedCap or MixedCap. +./a.go:103:17: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:106:20: r_b is used under score. You should use mixedCap or MixedCap. +./a.go:110:2: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:112:9: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:117:8: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:119:9: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:124:6: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:126:9: v_b is used under score. You should use mixedCap or MixedCap. +./a.go:129:13: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:131:21: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:133:16: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:135:24: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:138:2: p_a is used under score. You should use mixedCap or MixedCap. +./a.go:139:2: p_b is used under score. You should use mixedCap or MixedCap. +./a.go:140:2: p_c is used under score. You should use mixedCap or MixedCap. +``` + +## CI + +### CircleCI + +```yaml +- run: + name: install nosnakecase + command: go install github.com/sivchari/nosnakecase/cmd/nosnakecase@latest + +- run: + name: run nosnakecase + command: go vet -vettool=`which nosnakecase` ./... +``` + +### GitHub Actions + +```yaml +- name: install nosnakecase + run: go install github.com/sivchari/nosnakecase/cmd/nosnakecase@latest + +- name: run nosnakecase + run: go vet -vettool=`which nosnakecase` ./... +``` diff --git a/vendor/github.com/sivchari/nosnakecase/nosnakecase.go b/vendor/github.com/sivchari/nosnakecase/nosnakecase.go new file mode 100644 index 0000000000..88cf70e3f0 --- /dev/null +++ b/vendor/github.com/sivchari/nosnakecase/nosnakecase.go @@ -0,0 +1,63 @@ +package nosnakecase + +import ( + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "nosnakecase is a linter that detects snake case of variable naming and function name." + +// Analyzer is a nosnakecase linter. +var Analyzer = &analysis.Analyzer{ + Name: "nosnakecase", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +func run(pass *analysis.Pass) (interface{}, error) { + result := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.Ident)(nil), + } + + result.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.Ident: + report(pass, n.Pos(), n.Name) + } + }) + + return nil, nil +} + +func report(pass *analysis.Pass, pos token.Pos, name string) { + // skip import _ "xxx" + if name == "_" { + return + } + + // skip package xxx_test + if strings.Contains(name, "_test") { + return + } + + // If prefix is Test or Benchmark, Fuzz, skip + // FYI https://go.dev/blog/examples + if strings.HasPrefix(name, "Test") || strings.HasPrefix(name, "Benchmark") || strings.HasPrefix(name, "Fuzz") { + return + } + + if strings.Contains(name, "_") { + pass.Reportf(pos, "%s contains underscore. You should use mixedCap or MixedCap.", name) + return + } +} diff --git a/vendor/github.com/sivchari/tenv/.gitignore b/vendor/github.com/sivchari/tenv/.gitignore new file mode 100644 index 0000000000..83470100fc --- /dev/null +++ b/vendor/github.com/sivchari/tenv/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +.idea + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/sivchari/tenv/.golangci.yml b/vendor/github.com/sivchari/tenv/.golangci.yml new file mode 100644 index 0000000000..f687df8362 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/.golangci.yml @@ -0,0 +1,38 @@ +run: + timeout: 5m + skip-files: [] + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 12 + misspell: + locale: US + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + +linters: + disable-all: true + enable: + - govet + - revive + - goimports + - staticcheck + - gosimple + - unused + - godox + - gofumpt + - misspell + - gocyclo + +issues: + exclude-use-default: true + max-per-linter: 0 + max-same-issues: 0 + exclude: [] diff --git a/vendor/github.com/sivchari/tenv/LICENSE b/vendor/github.com/sivchari/tenv/LICENSE new file mode 100644 index 0000000000..5185ec09a5 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 sivchari + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sivchari/tenv/README.md b/vendor/github.com/sivchari/tenv/README.md new file mode 100644 index 0000000000..c5d0047734 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/README.md @@ -0,0 +1,107 @@ +# tenv + +![tenv Gopher](./tenv.png "Gopher") + + +[![test_and_lint](https://github.com/sivchari/tenv/actions/workflows/workflows.yml/badge.svg?branch=main)](https://github.com/sivchari/tenv/actions/workflows/workflows.yml) + +tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + +## Instruction + +```sh +go install github.com/sivchari/tenv/cmd/tenv +``` + +## Usage + +```go +package main + +import ( + "fmt" + "os" + "testing" +) + +func TestMain(t *testing.T) { + fmt.Println(os.Getenv("GO")) + os.Setenv("GO", "HACKING GOPHER") +} + +func TestMain2(t *testing.T) { + fmt.Println(os.Getenv("GO")) +} + +func helper() { + os.Setenv("GO", "HACKING GOPHER") +} +``` + +```console +go vet -vettool=(which tenv) ./... + +# a +./main_test.go:11:2: os.Setenv() can be replaced by `t.Setenv()` in TestMain +``` + +### option + +The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + +By default, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + +```go +package main + +import ( + "fmt" + "os" + "testing" +) + +func TestMain(t *testing.T) { + fmt.Println(os.Getenv("GO")) + os.Setenv("GO", "HACKING GOPHER") +} + +func TestMain2(t *testing.T) { + fmt.Println(os.Getenv("GO")) +} + +func helper() { + os.Setenv("GO", "HACKING GOPHER") +} +``` + +```console +go vet -vettool=(which tenv) -tenv.all ./... + +# a +./main_test.go:11:2: os.Setenv() can be replaced by `t.Setenv()` in TestMain +./main_test.go:19:2: os.Setenv() can be replaced by `testing.Setenv()` in helper +``` + +## CI + +### CircleCI + +```yaml +- run: + name: install tenv + command: go install github.com/sivchari/tenv + +- run: + name: run tenv + command: go vet -vettool=`which tenv` ./... +``` + +### GitHub Actions + +```yaml +- name: install tenv + run: go install github.com/sivchari/tenv + +- name: run tenv + run: go vet -vettool=`which tenv` ./... +``` diff --git a/vendor/github.com/sivchari/tenv/tenv.go b/vendor/github.com/sivchari/tenv/tenv.go new file mode 100644 index 0000000000..fcff98d058 --- /dev/null +++ b/vendor/github.com/sivchari/tenv/tenv.go @@ -0,0 +1,213 @@ +package tenv + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17" + +// Analyzer is tenv analyzer +var Analyzer = &analysis.Analyzer{ + Name: "tenv", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +var ( + A = "all" + aflag bool +) + +func init() { + Analyzer.Flags.BoolVar(&aflag, A, false, "the all option will run against all method in test file") +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl: + checkFuncDecl(pass, n, pass.Fset.File(n.Pos()).Name()) + case *ast.FuncLit: + checkFuncLit(pass, n, pass.Fset.File(n.Pos()).Name()) + } + }) + + return nil, nil +} + +func checkFuncDecl(pass *analysis.Pass, f *ast.FuncDecl, fileName string) { + argName, ok := targetRunner(f.Type.Params.List, fileName) + if !ok { + return + } + checkStmts(pass, f.Body.List, f.Name.Name, argName) +} + +func checkFuncLit(pass *analysis.Pass, f *ast.FuncLit, fileName string) { + argName, ok := targetRunner(f.Type.Params.List, fileName) + if !ok { + return + } + checkStmts(pass, f.Body.List, "anonymous function", argName) +} + +func checkStmts(pass *analysis.Pass, stmts []ast.Stmt, funcName, argName string) { + for _, stmt := range stmts { + switch stmt := stmt.(type) { + case *ast.ExprStmt: + if !checkExprStmt(pass, stmt, funcName, argName) { + continue + } + case *ast.IfStmt: + if !checkIfStmt(pass, stmt, funcName, argName) { + continue + } + case *ast.AssignStmt: + if !checkAssignStmt(pass, stmt, funcName, argName) { + continue + } + } + } +} + +func checkExprStmt(pass *analysis.Pass, stmt *ast.ExprStmt, funcName, argName string) bool { + callExpr, ok := stmt.X.(*ast.CallExpr) + if !ok { + return false + } + fun, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + fun.Sel.Name + if targetName == "os.Setenv" { + if argName == "" { + argName = "testing" + } + pass.Reportf(stmt.Pos(), "os.Setenv() can be replaced by `%s.Setenv()` in %s", argName, funcName) + } + return true +} + +func checkIfStmt(pass *analysis.Pass, stmt *ast.IfStmt, funcName, argName string) bool { + assignStmt, ok := stmt.Init.(*ast.AssignStmt) + if !ok { + return false + } + rhs, ok := assignStmt.Rhs[0].(*ast.CallExpr) + if !ok { + return false + } + fun, ok := rhs.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + fun.Sel.Name + if targetName == "os.Setenv" { + if argName == "" { + argName = "testing" + } + pass.Reportf(stmt.Pos(), "os.Setenv() can be replaced by `%s.Setenv()` in %s", argName, funcName) + } + return true +} + +func checkAssignStmt(pass *analysis.Pass, stmt *ast.AssignStmt, funcName, argName string) bool { + rhs, ok := stmt.Rhs[0].(*ast.CallExpr) + if !ok { + return false + } + fun, ok := rhs.Fun.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + fun.Sel.Name + if targetName == "os.Setenv" { + if argName == "" { + argName = "testing" + } + pass.Reportf(stmt.Pos(), "os.Setenv() can be replaced by `%s.Setenv()` in %s", argName, funcName) + } + return true +} + +func targetRunner(params []*ast.Field, fileName string) (string, bool) { + for _, p := range params { + switch typ := p.Type.(type) { + case *ast.StarExpr: + if checkStarExprTarget(typ) { + if len(p.Names) == 0 { + return "", false + } + argName := p.Names[0].Name + return argName, true + } + case *ast.SelectorExpr: + if checkSelectorExprTarget(typ) { + if len(p.Names) == 0 { + return "", false + } + argName := p.Names[0].Name + return argName, true + } + } + } + if aflag && strings.HasSuffix(fileName, "_test.go") { + return "", true + } + return "", false +} + +func checkStarExprTarget(typ *ast.StarExpr) bool { + selector, ok := typ.X.(*ast.SelectorExpr) + if !ok { + return false + } + x, ok := selector.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + selector.Sel.Name + switch targetName { + case "testing.T", "testing.B", "testing.F": + return true + default: + return false + } +} + +func checkSelectorExprTarget(typ *ast.SelectorExpr) bool { + x, ok := typ.X.(*ast.Ident) + if !ok { + return false + } + targetName := x.Name + "." + typ.Sel.Name + return targetName == "testing.TB" +} diff --git a/vendor/github.com/sivchari/tenv/tenv.png b/vendor/github.com/sivchari/tenv/tenv.png new file mode 100644 index 0000000000..96dc967e38 Binary files /dev/null and b/vendor/github.com/sivchari/tenv/tenv.png differ diff --git a/vendor/github.com/sonatard/noctx/.golangci.yml b/vendor/github.com/sonatard/noctx/.golangci.yml index 1580acde27..55ebeebdb0 100644 --- a/vendor/github.com/sonatard/noctx/.golangci.yml +++ b/vendor/github.com/sonatard/noctx/.golangci.yml @@ -1,20 +1,14 @@ run: - -linters-settings: - govet: + linters-settings: + govet: + enable-all: true + linters: enable-all: true - -linters: - enable-all: true - disable: - - gochecknoglobals - - gomnd - - gocognit - - nestif - -issues: - exclude-rules: - - path: reqwithoutctx/ssa.go - text: "Consider preallocating `exts`" - linters: - - prealloc + disable: + - gochecknoglobals + - gomnd + - gocognit + - nestif + - nilnil + - paralleltest + - varnamelen \ No newline at end of file diff --git a/vendor/github.com/sonatard/noctx/README.md b/vendor/github.com/sonatard/noctx/README.md index bfe9782c6d..b3793fc968 100644 --- a/vendor/github.com/sonatard/noctx/README.md +++ b/vendor/github.com/sonatard/noctx/README.md @@ -1,25 +1,60 @@ # noctx -![](https://github.com/sonatard/noctx/workflows/.github/workflows/ci.yml/badge.svg) +![](https://github.com/sonatard/noctx/workflows/CI/badge.svg) `noctx` finds sending http request without context.Context. -You should use `noctx` if sending http request in your library. +You should use `noctx` if sending http request in your library. Passing `context.Context` enables library user to cancel http request, getting trace information and so on. -## Install +## Usage + + +### noctx with go vet + +go vet is a Go standard tool for analyzing source code. +1. Install noctx. ```sh -$ go get -u github.com/sonatard/noctx/cmd/noctx +$ go install github.com/sonatard/noctx/cmd/noctx@latest ``` -## Usage - +2. noctx execute ```sh $ go vet -vettool=`which noctx` main.go ./main.go:6:11: net/http.Get must not be called ``` +### noctx with golangci-lint + +golangci-lint is a fast Go linters runner. + +1. Install golangci-lint. +[golangci-lint - Install](https://golangci-lint.run/usage/install/) + +2. Setup .golangci.yml +```yaml: +# Add noctx to enable linters. +linters: + enable: + - noctx + +# Or enable-all is true. +linters: + enable-all: true + disable: + - xxx # Add unused linter to disable linters. +``` + +3. noctx execute +```sh +# Use .golangci.yml +$ golangci-lint run + +# Only noctx execute +golangci-lint run --disable-all -E noctx +``` + ## Detection rules - Executing following functions - `net/http.Get` @@ -39,7 +74,51 @@ $ go vet -vettool=`which noctx` main.go `(http.Request).WithContext(ctx)` has a disadvantage of performance because it returns a copy of `http.Request`. Use `http.NewRequestWithContext` function if you only support Go1.13 or later. -## Sample Code + +If your library already provides functions that don't accept context, you define a new function that accepts context and make the existing function a wrapper for a new function. + + +```go +// Before fix code +// Sending an HTTP request but not accepting context +func Send(body io.Reader) error { + req,err := http.NewRequest(http.MethodPost, "http://example.com", body) + if err != nil { + return err + } + _, err = http.DefaultClient.Do(req) + if err != nil{ + return err + } + + return nil +} +``` + +```go +// After fix code +func Send(body io.Reader) error { + // Pass context.Background() to SendWithContext + return SendWithContext(context.Background(), body) +} + +// Sending an HTTP request and accepting context +func SendWithContext(ctx context.Context, body io.Reader) error { + // Change NewRequest to NewRequestWithContext and pass context it + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://example.com", body) + if err != nil { + return nil + } + _, err = http.DefaultClient.Do(req) + if err != nil { + return err + } + + return nil +} +``` + +## Detection sample ```go package main diff --git a/vendor/github.com/sonatard/noctx/ngfunc/main.go b/vendor/github.com/sonatard/noctx/ngfunc/main.go index cfeb0f0010..46306218d2 100644 --- a/vendor/github.com/sonatard/noctx/ngfunc/main.go +++ b/vendor/github.com/sonatard/noctx/ngfunc/main.go @@ -1,6 +1,7 @@ package ngfunc import ( + "fmt" "go/types" "github.com/gostaticanalysis/analysisutil" @@ -34,8 +35,11 @@ func Run(pass *analysis.Pass) (interface{}, error) { func ngCalledFuncs(pass *analysis.Pass, ngFuncs []*types.Func) []*Report { var reports []*Report - srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs - for _, sf := range srcFuncs { + ssa, ok := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + if !ok { + panic(fmt.Sprintf("%T is not *buildssa.SSA", pass.ResultOf[buildssa.Analyzer])) + } + for _, sf := range ssa.SrcFuncs { for _, b := range sf.Blocks { for _, instr := range b.Instrs { for _, ngFunc := range ngFuncs { diff --git a/vendor/github.com/sonatard/noctx/noctx.go b/vendor/github.com/sonatard/noctx/noctx.go index 478ad8855d..89e0446ecd 100644 --- a/vendor/github.com/sonatard/noctx/noctx.go +++ b/vendor/github.com/sonatard/noctx/noctx.go @@ -1,6 +1,7 @@ package noctx import ( + "fmt" "github.com/sonatard/noctx/ngfunc" "github.com/sonatard/noctx/reqwithoutctx" "golang.org/x/tools/go/analysis" @@ -8,23 +9,26 @@ import ( ) var Analyzer = &analysis.Analyzer{ - Name: "noctx", - Doc: Doc, - Run: run, + Name: "noctx", + Doc: Doc, + Run: run, + RunDespiteErrors: false, Requires: []*analysis.Analyzer{ buildssa.Analyzer, }, + ResultType: nil, + FactTypes: nil, } const Doc = "noctx finds sending http request without context.Context" func run(pass *analysis.Pass) (interface{}, error) { if _, err := ngfunc.Run(pass); err != nil { - return nil, err + return nil, fmt.Errorf("run: %w", err) } if _, err := reqwithoutctx.Run(pass); err != nil { - return nil, err + return nil, fmt.Errorf("run: %w", err) } return nil, nil diff --git a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go index 35751269ee..d7e0f5084d 100644 --- a/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go +++ b/vendor/github.com/sonatard/noctx/reqwithoutctx/ssa.go @@ -1,6 +1,7 @@ package reqwithoutctx import ( + "fmt" "go/types" "github.com/gostaticanalysis/analysisutil" @@ -10,6 +11,7 @@ import ( "golang.org/x/tools/go/ssa" ) +//nolint:govet type Analyzer struct { Funcs []*ssa.Function newRequestType types.Type @@ -20,10 +22,13 @@ func NewAnalyzer(pass *analysis.Pass) *Analyzer { newRequestType := analysisutil.TypeOf(pass, "net/http", "NewRequest") requestType := analysisutil.TypeOf(pass, "net/http", "*Request") - srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + ssa, ok := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + if !ok { + panic(fmt.Sprintf("%T is not *buildssa.SSA", pass.ResultOf[buildssa.Analyzer])) + } return &Analyzer{ - Funcs: srcFuncs, + Funcs: ssa.SrcFuncs, newRequestType: newRequestType, requestType: requestType, } @@ -88,14 +93,14 @@ func (a *Analyzer) usedReqs() map[string]*ssa.Extract { } func (a *Analyzer) usedReqByCall(call *ssa.Call) []*ssa.Extract { - var exts []*ssa.Extract + args := call.Common().Args + exts := make([]*ssa.Extract, 0, len(args)) // skip net/http.Request method call if call.Common().Signature().Recv() != nil && types.Identical(call.Value().Type(), a.requestType) { return exts } - args := call.Common().Args if len(args) == 0 { return exts } diff --git a/vendor/github.com/sourcegraph/go-diff/LICENSE b/vendor/github.com/sourcegraph/go-diff/LICENSE index 0733b6e5f2..5ba1c44360 100644 --- a/vendor/github.com/sourcegraph/go-diff/LICENSE +++ b/vendor/github.com/sourcegraph/go-diff/LICENSE @@ -33,3 +33,14 @@ in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.go index 0f465b9e27..81aa655707 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/diff.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.go @@ -120,6 +120,10 @@ const onlyInMessage = "Only in %s: %s\n" // See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html. const diffTimeParseLayout = "2006-01-02 15:04:05 -0700" +// Apple's diff is based on freebsd diff, which uses a timestamp format that does +// not include the timezone offset. +const diffTimeParseWithoutTZLayout = "2006-01-02 15:04:05" + // diffTimeFormatLayout is the layout used to format (i.e., print) the time in unified diff file // header timestamps. // See https://www.gnu.org/software/diffutils/manual/html_node/Detailed-Unified.html. diff --git a/vendor/github.com/sourcegraph/go-diff/diff/parse.go b/vendor/github.com/sourcegraph/go-diff/diff/parse.go index 8d5cfc238e..48eeb96702 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/parse.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/parse.go @@ -23,14 +23,14 @@ func ParseMultiFileDiff(diff []byte) ([]*FileDiff, error) { // NewMultiFileDiffReader returns a new MultiFileDiffReader that reads // a multi-file unified diff from r. func NewMultiFileDiffReader(r io.Reader) *MultiFileDiffReader { - return &MultiFileDiffReader{reader: bufio.NewReader(r)} + return &MultiFileDiffReader{reader: newLineReader(r)} } // MultiFileDiffReader reads a multi-file unified diff. type MultiFileDiffReader struct { line int offset int64 - reader *bufio.Reader + reader *lineReader // TODO(sqs): line and offset tracking in multi-file diffs is broken; add tests and fix @@ -46,6 +46,14 @@ type MultiFileDiffReader struct { // all hunks) from r. If there are no more files in the diff, it // returns error io.EOF. func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { + fd, _, err := r.ReadFileWithTrailingContent() + return fd, err +} + +// ReadFileWithTrailingContent reads the next file unified diff (including +// headers and all hunks) from r, also returning any trailing content. If there +// are no more files in the diff, it returns error io.EOF. +func (r *MultiFileDiffReader) ReadFileWithTrailingContent() (*FileDiff, string, error) { fr := &FileDiffReader{ line: r.line, offset: r.offset, @@ -59,23 +67,33 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { switch e := err.(type) { case *ParseError: if e.Err == ErrNoFileHeader || e.Err == ErrExtendedHeadersEOF { - return nil, io.EOF + // Any non-diff content preceding a valid diff is included in the + // extended headers of the following diff. In this way, mixed diff / + // non-diff content can be parsed. Trailing non-diff content is + // different: it doesn't make sense to return a FileDiff with only + // extended headers populated. Instead, we return any trailing content + // in case the caller needs it. + trailing := "" + if fd != nil { + trailing = strings.Join(fd.Extended, "\n") + } + return nil, trailing, io.EOF } - return nil, err + return nil, "", err case OverflowError: r.nextFileFirstLine = []byte(e) - return fd, nil + return fd, "", nil default: - return nil, err + return nil, "", err } } // FileDiff is added/deleted file // No further collection of hunks needed if fd.NewName == "" { - return fd, nil + return fd, "", nil } // Before reading hunks, check to see if there are any. If there @@ -85,9 +103,9 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { // caused by the lack of any hunks, or a malformatted hunk, so we // need to perform the check here. hr := fr.HunksReader() - line, err := readLine(r.reader) + line, err := r.reader.readLine() if err != nil && err != io.EOF { - return fd, err + return fd, "", err } line = bytes.TrimSuffix(line, []byte{'\n'}) if bytes.HasPrefix(line, hunkPrefix) { @@ -101,10 +119,10 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { // This just means we finished reading the hunks for the // current file. See the ErrBadHunkLine doc for more info. r.nextFileFirstLine = e.Line - return fd, nil + return fd, "", nil } } - return nil, err + return nil, "", err } } else { // There weren't any hunks, so that line we peeked ahead at @@ -112,7 +130,7 @@ func (r *MultiFileDiffReader) ReadFile() (*FileDiff, error) { r.nextFileFirstLine = line } - return fd, nil + return fd, "", nil } // ReadAllFiles reads all file unified diffs (including headers and all @@ -141,14 +159,14 @@ func ParseFileDiff(diff []byte) (*FileDiff, error) { // NewFileDiffReader returns a new FileDiffReader that reads a file // unified diff. func NewFileDiffReader(r io.Reader) *FileDiffReader { - return &FileDiffReader{reader: bufio.NewReader(r)} + return &FileDiffReader{reader: &lineReader{reader: bufio.NewReader(r)}} } // FileDiffReader reads a unified file diff. type FileDiffReader struct { line int offset int64 - reader *bufio.Reader + reader *lineReader // fileHeaderLine is the first file header line, set by: // @@ -236,7 +254,6 @@ func (r *FileDiffReader) ReadFileHeaders() (origName, newName string, origTimest "", nil, nil, nil } } - origName, origTimestamp, err = r.readOneFileHeader([]byte("--- ")) if err != nil { return "", "", nil, nil, err @@ -266,7 +283,7 @@ func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, time if r.fileHeaderLine == nil { var err error - line, err = readLine(r.reader) + line, err = r.reader.readLine() if err == io.EOF { return "", nil, &ParseError{r.line, r.offset, ErrNoFileHeader} } else if err != nil { @@ -289,10 +306,16 @@ func (r *FileDiffReader) readOneFileHeader(prefix []byte) (filename string, time parts := strings.SplitN(trimmedLine, "\t", 2) filename = parts[0] if len(parts) == 2 { + var ts time.Time // Timestamp is optional, but this header has it. - ts, err := time.Parse(diffTimeParseLayout, parts[1]) + ts, err = time.Parse(diffTimeParseLayout, parts[1]) if err != nil { - return "", nil, err + var err1 error + ts, err1 = time.Parse(diffTimeParseWithoutTZLayout, parts[1]) + if err1 != nil { + return "", nil, err + } + err = nil } timestamp = &ts } @@ -318,7 +341,7 @@ func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) { var line []byte if r.fileHeaderLine == nil { var err error - line, err = readLine(r.reader) + line, err = r.reader.readLine() if err == io.EOF { return xheaders, &ParseError{r.line, r.offset, ErrExtendedHeadersEOF} } else if err != nil { @@ -354,65 +377,192 @@ func (r *FileDiffReader) ReadExtendedHeaders() ([]string, error) { } } +// readQuotedFilename extracts a quoted filename from the beginning of a string, +// returning the unquoted filename and any remaining text after the filename. +func readQuotedFilename(text string) (value string, remainder string, err error) { + if text == "" || text[0] != '"' { + return "", "", fmt.Errorf(`string must start with a '"': %s`, text) + } + + // The end quote is the first quote NOT preceeded by an uneven number of backslashes. + numberOfBackslashes := 0 + for i, c := range text { + if c == '"' && i > 0 && numberOfBackslashes%2 == 0 { + value, err = strconv.Unquote(text[:i+1]) + remainder = text[i+1:] + return + } else if c == '\\' { + numberOfBackslashes++ + } else { + numberOfBackslashes = 0 + } + } + return "", "", fmt.Errorf(`end of string found while searching for '"': %s`, text) +} + +// parseDiffGitArgs extracts the two filenames from a 'diff --git' line. +// Returns false on syntax error, true if syntax is valid. Even with a +// valid syntax, it may be impossible to extract filenames; if so, the +// function returns ("", "", true). +func parseDiffGitArgs(diffArgs string) (string, string, bool) { + length := len(diffArgs) + if length < 3 { + return "", "", false + } + + if diffArgs[0] != '"' && diffArgs[length-1] != '"' { + // Both filenames are unquoted. + firstSpace := strings.IndexByte(diffArgs, ' ') + if firstSpace <= 0 || firstSpace == length-1 { + return "", "", false + } + + secondSpace := strings.IndexByte(diffArgs[firstSpace+1:], ' ') + if secondSpace == -1 { + if diffArgs[firstSpace+1] == '"' { + // The second filename begins with '"', but doesn't end with one. + return "", "", false + } + return diffArgs[:firstSpace], diffArgs[firstSpace+1:], true + } + + // One or both filenames contain a space, but the names are + // unquoted. Here, the 'diff --git' syntax is ambiguous, and + // we have to obtain the filenames elsewhere (e.g. from the + // hunk headers or extended headers). HOWEVER, if the file + // is newly created and empty, there IS no other place to + // find the filename. In this case, the two filenames are + // identical (except for the leading 'a/' prefix), and we have + // to handle that case here. + first := diffArgs[:length/2] + second := diffArgs[length/2+1:] + + // If the two strings could be equal, based on length, proceed. + if length%2 == 1 { + // If the name minus the a/ b/ prefixes is equal, proceed. + if len(first) >= 3 && first[1] == '/' && first[1:] == second[1:] { + return first, second, true + } + // If the names don't have the a/ and b/ prefixes and they're equal, proceed. + if !(first[:2] == "a/" && second[:2] == "b/") && first == second { + return first, second, true + } + } + + // The syntax is (unfortunately) valid, but we could not extract + // the filenames. + return "", "", true + } + + if diffArgs[0] == '"' { + first, remainder, err := readQuotedFilename(diffArgs) + if err != nil || len(remainder) < 2 || remainder[0] != ' ' { + return "", "", false + } + if remainder[1] == '"' { + second, remainder, err := readQuotedFilename(remainder[1:]) + if remainder != "" || err != nil { + return "", "", false + } + return first, second, true + } + return first, remainder[1:], true + } + + // In this case, second argument MUST be quoted (or it's a syntax error) + i := strings.IndexByte(diffArgs, '"') + if i == -1 || i+2 >= length || diffArgs[i-1] != ' ' { + return "", "", false + } + + second, remainder, err := readQuotedFilename(diffArgs[i:]) + if remainder != "" || err != nil { + return "", "", false + } + return diffArgs[:i-1], second, true +} + // handleEmpty detects when FileDiff was an empty diff and will not have any hunks // that follow. It updates fd fields from the parsed extended headers. func handleEmpty(fd *FileDiff) (wasEmpty bool) { - var err error lineCount := len(fd.Extended) if lineCount > 0 && !strings.HasPrefix(fd.Extended[0], "diff --git ") { return false } - switch { - case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) && - strings.HasPrefix(fd.Extended[1], "new file mode "): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) + lineHasPrefix := func(idx int, prefix string) bool { + return strings.HasPrefix(fd.Extended[idx], prefix) + } + + linesHavePrefixes := func(idx1 int, prefix1 string, idx2 int, prefix2 string) bool { + return lineHasPrefix(idx1, prefix1) && lineHasPrefix(idx2, prefix2) + } + + isCopy := (lineCount == 4 && linesHavePrefixes(2, "copy from ", 3, "copy to ")) || + (lineCount == 6 && linesHavePrefixes(2, "copy from ", 3, "copy to ") && lineHasPrefix(5, "Binary files ")) || + (lineCount == 6 && linesHavePrefixes(1, "old mode ", 2, "new mode ") && linesHavePrefixes(4, "copy from ", 5, "copy to ")) + + isRename := (lineCount == 4 && linesHavePrefixes(2, "rename from ", 3, "rename to ")) || + (lineCount == 5 && linesHavePrefixes(2, "rename from ", 3, "rename to ") && lineHasPrefix(4, "Binary files ")) || + (lineCount == 6 && linesHavePrefixes(2, "rename from ", 3, "rename to ") && lineHasPrefix(5, "Binary files ")) || + (lineCount == 6 && linesHavePrefixes(1, "old mode ", 2, "new mode ") && linesHavePrefixes(4, "rename from ", 5, "rename to ")) + + isDeletedFile := (lineCount == 3 || lineCount == 4 && lineHasPrefix(3, "Binary files ") || lineCount > 4 && lineHasPrefix(3, "GIT binary patch")) && + lineHasPrefix(1, "deleted file mode ") + + isNewFile := (lineCount == 3 || lineCount == 4 && lineHasPrefix(3, "Binary files ") || lineCount > 4 && lineHasPrefix(3, "GIT binary patch")) && + lineHasPrefix(1, "new file mode ") + + isModeChange := lineCount == 3 && linesHavePrefixes(1, "old mode ", 2, "new mode ") + + isBinaryPatch := lineCount == 3 && lineHasPrefix(2, "Binary files ") || lineCount > 3 && lineHasPrefix(2, "GIT binary patch") + + if !isModeChange && !isCopy && !isRename && !isBinaryPatch && !isNewFile && !isDeletedFile { + return false + } + + var success bool + fd.OrigName, fd.NewName, success = parseDiffGitArgs(fd.Extended[0][len("diff --git "):]) + if isNewFile { fd.OrigName = "/dev/null" - fd.NewName, err = strconv.Unquote(names[1]) - if err != nil { - fd.NewName = names[1] - } - return true - case (lineCount == 3 || lineCount == 4 && strings.HasPrefix(fd.Extended[3], "Binary files ") || lineCount > 4 && strings.HasPrefix(fd.Extended[3], "GIT binary patch")) && - strings.HasPrefix(fd.Extended[1], "deleted file mode "): + } - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName, err = strconv.Unquote(names[0]) - if err != nil { - fd.OrigName = names[0] - } + if isDeletedFile { fd.NewName = "/dev/null" - return true - case lineCount == 4 && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName, err = strconv.Unquote(names[0]) - if err != nil { - fd.OrigName = names[0] - } - fd.NewName, err = strconv.Unquote(names[1]) - if err != nil { - fd.NewName = names[1] - } - return true - case lineCount == 6 && strings.HasPrefix(fd.Extended[5], "Binary files ") && strings.HasPrefix(fd.Extended[2], "rename from ") && strings.HasPrefix(fd.Extended[3], "rename to "): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName = names[0] - fd.NewName = names[1] - return true - case lineCount == 3 && strings.HasPrefix(fd.Extended[2], "Binary files ") || lineCount > 3 && strings.HasPrefix(fd.Extended[2], "GIT binary patch"): - names := strings.SplitN(fd.Extended[0][len("diff --git "):], " ", 2) - fd.OrigName, err = strconv.Unquote(names[0]) - if err != nil { - fd.OrigName = names[0] + } + + // For ambiguous 'diff --git' lines, try to reconstruct filenames using extended headers. + if success && (isCopy || isRename) && fd.OrigName == "" && fd.NewName == "" { + diffArgs := fd.Extended[0][len("diff --git "):] + + tryReconstruct := func(header string, prefix string, whichFile int, result *string) { + if !strings.HasPrefix(header, prefix) { + return + } + rawFilename := header[len(prefix):] + + // extract the filename prefix (e.g. "a/") from the 'diff --git' line. + var prefixLetterIndex int + if whichFile == 1 { + prefixLetterIndex = 0 + } else if whichFile == 2 { + prefixLetterIndex = len(diffArgs) - len(rawFilename) - 2 + } + if prefixLetterIndex < 0 || diffArgs[prefixLetterIndex+1] != '/' { + return + } + + *result = diffArgs[prefixLetterIndex:prefixLetterIndex+2] + rawFilename } - fd.NewName, err = strconv.Unquote(names[1]) - if err != nil { - fd.NewName = names[1] + + for _, header := range fd.Extended { + tryReconstruct(header, "copy from ", 1, &fd.OrigName) + tryReconstruct(header, "copy to ", 2, &fd.NewName) + tryReconstruct(header, "rename from ", 1, &fd.OrigName) + tryReconstruct(header, "rename to ", 2, &fd.NewName) } - return true - default: - return false } + return success } var ( @@ -447,7 +597,7 @@ func ParseHunks(diff []byte) ([]*Hunk, error) { // NewHunksReader returns a new HunksReader that reads unified diff hunks // from r. func NewHunksReader(r io.Reader) *HunksReader { - return &HunksReader{reader: bufio.NewReader(r)} + return &HunksReader{reader: &lineReader{reader: bufio.NewReader(r)}} } // A HunksReader reads hunks from a unified diff. @@ -455,7 +605,7 @@ type HunksReader struct { line int offset int64 hunk *Hunk - reader *bufio.Reader + reader *lineReader nextHunkHeaderLine []byte } @@ -474,7 +624,7 @@ func (r *HunksReader) ReadHunk() (*Hunk, error) { line = r.nextHunkHeaderLine r.nextHunkHeaderLine = nil } else { - line, err = readLine(r.reader) + line, err = r.reader.readLine() if err != nil { if err == io.EOF && r.hunk != nil { return r.hunk, nil @@ -518,12 +668,15 @@ func (r *HunksReader) ReadHunk() (*Hunk, error) { // If the line starts with `---` and the next one with `+++` we're // looking at a non-extended file header and need to abort. if bytes.HasPrefix(line, []byte("---")) { - ok, err := peekPrefix(r.reader, "+++") + ok, err := r.reader.nextLineStartsWith("+++") if err != nil { return r.hunk, err } if ok { - return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}} + ok2, _ := r.reader.nextNextLineStartsWith(string(hunkPrefix)) + if ok2 { + return r.hunk, &ParseError{r.line, r.offset, &ErrBadHunkLine{Line: line}} + } } } @@ -593,19 +746,6 @@ func linePrefix(c byte) bool { return false } -// peekPrefix peeks into the given reader to check whether the next -// bytes match the given prefix. -func peekPrefix(reader *bufio.Reader, prefix string) (bool, error) { - next, err := reader.Peek(len(prefix)) - if err != nil { - if err == io.EOF { - return false, nil - } - return false, err - } - return bytes.HasPrefix(next, []byte(prefix)), nil -} - // normalizeHeader takes a header of the form: // "@@ -linestart[,chunksize] +linestart[,chunksize] @@ section" // and returns two strings, with the first in the form: diff --git a/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go index 395fb7baf9..45300252b7 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/reader_util.go @@ -2,9 +2,92 @@ package diff import ( "bufio" + "bytes" + "errors" "io" ) +var ErrLineReaderUninitialized = errors.New("line reader not initialized") + +func newLineReader(r io.Reader) *lineReader { + return &lineReader{reader: bufio.NewReader(r)} +} + +// lineReader is a wrapper around a bufio.Reader that caches the next line to +// provide lookahead functionality for the next two lines. +type lineReader struct { + reader *bufio.Reader + + cachedNextLine []byte + cachedNextLineErr error +} + +// readLine returns the next unconsumed line and advances the internal cache of +// the lineReader. +func (l *lineReader) readLine() ([]byte, error) { + if l.cachedNextLine == nil && l.cachedNextLineErr == nil { + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + } + + if l.cachedNextLineErr != nil { + return nil, l.cachedNextLineErr + } + + next := l.cachedNextLine + + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + + return next, nil +} + +// nextLineStartsWith looks at the line that would be returned by the next call +// to readLine to check whether it has the given prefix. +// +// io.EOF and bufio.ErrBufferFull errors are ignored so that the function can +// be used when at the end of the file. +func (l *lineReader) nextLineStartsWith(prefix string) (bool, error) { + if l.cachedNextLine == nil && l.cachedNextLineErr == nil { + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + } + + return l.lineHasPrefix(l.cachedNextLine, prefix, l.cachedNextLineErr) +} + +// nextNextLineStartsWith checks the prefix of the line *after* the line that +// would be returned by the next readLine. +// +// io.EOF and bufio.ErrBufferFull errors are ignored so that the function can +// be used when at the end of the file. +// +// The lineReader MUST be initialized by calling readLine at least once before +// calling nextLineStartsWith. Otherwise ErrLineReaderUninitialized will be +// returned. +func (l *lineReader) nextNextLineStartsWith(prefix string) (bool, error) { + if l.cachedNextLine == nil && l.cachedNextLineErr == nil { + l.cachedNextLine, l.cachedNextLineErr = readLine(l.reader) + } + + next, err := l.reader.Peek(len(prefix)) + return l.lineHasPrefix(next, prefix, err) +} + +// lineHasPrefix checks whether the given line has the given prefix with +// bytes.HasPrefix. +// +// The readErr should be the error that was returned when the line was read. +// lineHasPrefix checks the error to adjust its return value to, e.g., return +// false and ignore the error when readErr is io.EOF. +func (l *lineReader) lineHasPrefix(line []byte, prefix string, readErr error) (bool, error) { + if readErr != nil { + if readErr == io.EOF || readErr == bufio.ErrBufferFull { + return false, nil + } + return false, readErr + } + + return bytes.HasPrefix(line, []byte(prefix)), nil +} + // readLine is a helper that mimics the functionality of calling bufio.Scanner.Scan() and // bufio.Scanner.Bytes(), but without the token size limitation. It will read and return // the next line in the Reader with the trailing newline stripped. It will return an diff --git a/vendor/github.com/spf13/afero/.travis.yml b/vendor/github.com/spf13/afero/.travis.yml deleted file mode 100644 index 1459644981..0000000000 --- a/vendor/github.com/spf13/afero/.travis.yml +++ /dev/null @@ -1,22 +0,0 @@ -sudo: false -language: go - -go: - - "1.13" - - "1.14" - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go build -v ./... - - go test -count=1 -cover -race -v ./... - - go vet ./... - - FILES=$(gofmt -s -l . zipfs sftpfs mem tarfs); if [[ -n "${FILES}" ]]; then echo "You have go format errors; gofmt your changes"; exit 1; fi diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index c3e807aef8..3bafbfdfca 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,7 @@ A FileSystem Abstraction System for Go -[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Overview @@ -33,7 +33,7 @@ filesystem for full interoperability. * Support for compositional (union) file systems by combining multiple file systems acting as one * Specialized backends which modify existing filesystems (Read Only, Regexp filtered) * A set of utility functions ported from io, ioutil & hugo to be afero aware - +* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` # Using Afero @@ -79,11 +79,11 @@ would. So if my application before had: ```go -os.Open('/tmp/foo') +os.Open("/tmp/foo") ``` We would replace it with: ```go -AppFs.Open('/tmp/foo') +AppFs.Open("/tmp/foo") ``` `AppFs` being the variable we defined above. @@ -94,6 +94,7 @@ AppFs.Open('/tmp/foo') File System Methods Available: ```go Chmod(name string, mode os.FileMode) : error +Chown(name string, uid, gid int) : error Chtimes(name string, atime time.Time, mtime time.Time) : error Create(name string) : File, error Mkdir(name string, perm os.FileMode) : error @@ -258,6 +259,18 @@ system using InMemoryFile. Afero has experimental support for secure file transfer protocol (sftp). Which can be used to perform file operations over a encrypted channel. +### GCSFs + +Afero has experimental support for Google Cloud Storage (GCS). You can either set the +`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in +`NewGcsFS` to configure access to your GCS bucket. + +Some known limitations of the existing implementation: +* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. +* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. +* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. + + ## Filtering Backends ### BasePathFs diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go index f5b5e127cd..39f6585209 100644 --- a/vendor/github.com/spf13/afero/afero.go +++ b/vendor/github.com/spf13/afero/afero.go @@ -91,17 +91,20 @@ type Fs interface { // The name of this FileSystem Name() string - //Chmod changes the mode of the named file to mode. + // Chmod changes the mode of the named file to mode. Chmod(name string, mode os.FileMode) error - //Chtimes changes the access and modification times of the named file + // Chown changes the uid and gid of the named file. + Chown(name string, uid, gid int) error + + // Chtimes changes the access and modification times of the named file Chtimes(name string, atime time.Time, mtime time.Time) error } var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml index 5d2f34bf16..65e20e8ca3 100644 --- a/vendor/github.com/spf13/afero/appveyor.yml +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -1,3 +1,5 @@ +# This currently does nothing. We have moved to GitHub action, but this is kept +# until spf13 has disabled this project in AppVeyor. version: '{build}' clone_folder: C:\gopath\src\github.com\spf13\afero environment: @@ -6,10 +8,3 @@ build_script: - cmd: >- go version - go env - - go get -v github.com/spf13/afero/... - - go build -v github.com/spf13/afero/... -test_script: -- cmd: go test -count=1 -cover -race -v github.com/spf13/afero/... diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go index 3a14b833e4..2e72793a3e 100644 --- a/vendor/github.com/spf13/afero/basepath.go +++ b/vendor/github.com/spf13/afero/basepath.go @@ -1,6 +1,7 @@ package afero import ( + "io/fs" "os" "path/filepath" "runtime" @@ -8,7 +9,10 @@ import ( "time" ) -var _ Lstater = (*BasePathFs)(nil) +var ( + _ Lstater = (*BasePathFs)(nil) + _ fs.ReadDirFile = (*BasePathFile)(nil) +) // The BasePathFs restricts all operations to a given path within an Fs. // The given file name to the operations on this Fs will be prepended with @@ -33,6 +37,13 @@ func (f *BasePathFile) Name() string { return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) } +func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { + if rdf, ok := f.File.(fs.ReadDirFile); ok { + return rdf.ReadDir(n) + } + return readDirFile{f.File}.ReadDir(n) +} + func NewBasePathFs(source Fs, path string) Fs { return &BasePathFs{source: source, path: path} } @@ -83,6 +94,13 @@ func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { return b.source.Chmod(name, mode) } +func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chown", Path: name, Err: err} + } + return b.source.Chown(name, uid, gid) +} + func (b *BasePathFs) Name() string { return "BasePathFs" } @@ -202,5 +220,3 @@ func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { } return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} } - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go index 29a26c67dd..017d344fd5 100644 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -75,6 +75,10 @@ func (u *CacheOnReadFs) copyToLayer(name string) error { return copyToLayer(u.base, u.layer, name) } +func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { + return copyFileToLayer(u.base, u.layer, name, flag, perm) +} + func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { st, _, err := u.cacheStatus(name) if err != nil { @@ -117,6 +121,27 @@ func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { return u.layer.Chmod(name, mode) } +func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chown(name, uid, gid) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chown(name, uid, gid) + } + if err != nil { + return err + } + return u.layer.Chown(name, uid, gid) +} + func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { st, fi, err := u.cacheStatus(name) if err != nil { @@ -191,7 +216,7 @@ func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, switch st { case cacheLocal, cacheHit: default: - if err := u.copyToLayer(name); err != nil { + if err := u.copyFileToLayer(name, flag, perm); err != nil { return nil, err } } diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go index 18b45824be..30855de572 100644 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build aix darwin openbsd freebsd netbsd dragonfly +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos +// +build aix darwin openbsd freebsd netbsd dragonfly zos package afero diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go index 2b850e4ddb..12792d21e2 100644 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -10,12 +10,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd -// +build !aix +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos package afero diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go index 96b7701261..184d6dd702 100644 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -14,7 +14,7 @@ var _ Lstater = (*CopyOnWriteFs)(nil) // a possibly writeable layer on top. Changes to the file system will only // be made in the overlay: Changing an existing file in the base layer which // is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). +// includes also calls to e.g. Chtimes(), Chmod() and Chown()). // // Reading directories is currently only supported via Open(), not OpenFile(). type CopyOnWriteFs struct { @@ -75,6 +75,19 @@ func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { return u.layer.Chmod(name, mode) } +func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chown(name, uid, gid) +} + func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { fi, err := u.layer.Stat(name) if err != nil { @@ -210,7 +223,7 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, return nil, err } if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { + if err = u.layer.MkdirAll(dir, 0o777); err != nil { return nil, err } return u.layer.OpenFile(name, flag, perm) @@ -234,8 +247,9 @@ func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, // This function handles the 9 different possibilities caused // by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory +// +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory func (u *CopyOnWriteFs) Open(name string) (File, error) { // Since the overlay overrides the base we check that first b, err := u.isBaseFile(name) @@ -309,5 +323,5 @@ func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { } func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) } diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go index c42193688c..ac0de6d51f 100644 --- a/vendor/github.com/spf13/afero/httpFs.go +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -29,7 +29,7 @@ type httpDir struct { } func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || strings.Contains(name, "\x00") { return nil, errors.New("http: invalid character in file path") } @@ -67,6 +67,10 @@ func (h HttpFs) Chmod(name string, mode os.FileMode) error { return h.source.Chmod(name, mode) } +func (h HttpFs) Chown(name string, uid, gid int) error { + return h.source.Chown(name, uid, gid) +} + func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { return h.source.Chtimes(name, atime, mtime) } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go b/vendor/github.com/spf13/afero/internal/common/adapters.go similarity index 53% rename from vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go rename to vendor/github.com/spf13/afero/internal/common/adapters.go index 6ff86c4fb4..60685caa54 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go +++ b/vendor/github.com/spf13/afero/internal/common/adapters.go @@ -1,10 +1,9 @@ -// Copyright 2022 Google LLC +// Copyright © 2022 Steve Francia . // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,15 +11,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package storage - -import ( - "context" +package common - "google.golang.org/grpc/metadata" -) +import "io/fs" -// InsertMetadata inserts the given gRPC metadata into the outgoing context. -func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { - return insertMetadata(ctx, mds...) +// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry +type FileInfoDirEntry struct { + fs.FileInfo } + +var _ fs.DirEntry = FileInfoDirEntry{} + +func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go new file mode 100644 index 0000000000..938b9316e6 --- /dev/null +++ b/vendor/github.com/spf13/afero/iofs.go @@ -0,0 +1,298 @@ +//go:build go1.16 +// +build go1.16 + +package afero + +import ( + "io" + "io/fs" + "os" + "path" + "sort" + "time" + + "github.com/spf13/afero/internal/common" +) + +// IOFS adopts afero.Fs to stdlib io/fs.FS +type IOFS struct { + Fs +} + +func NewIOFS(fs Fs) IOFS { + return IOFS{Fs: fs} +} + +var ( + _ fs.FS = IOFS{} + _ fs.GlobFS = IOFS{} + _ fs.ReadDirFS = IOFS{} + _ fs.ReadFileFS = IOFS{} + _ fs.StatFS = IOFS{} + _ fs.SubFS = IOFS{} +) + +func (iofs IOFS) Open(name string) (fs.File, error) { + const op = "open" + + // by convention for fs.FS implementations we should perform this check + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + file, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + // file should implement fs.ReadDirFile + if _, ok := file.(fs.ReadDirFile); !ok { + file = readDirFile{file} + } + + return file, nil +} + +func (iofs IOFS) Glob(pattern string) ([]string, error) { + const op = "glob" + + // afero.Glob does not perform this check but it's required for implementations + if _, err := path.Match(pattern, ""); err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + items, err := Glob(iofs.Fs, pattern) + if err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + return items, nil +} + +func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { + f, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + + defer f.Close() + + if rdf, ok := f.(fs.ReadDirFile); ok { + items, err := rdf.ReadDir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) + return items, nil + } + + items, err := f.Readdir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Sort(byName(items)) + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +func (iofs IOFS) ReadFile(name string) ([]byte, error) { + const op = "readfile" + + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + bytes, err := ReadFile(iofs.Fs, name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + return bytes, nil +} + +func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } + +func (IOFS) wrapError(op, path string, err error) error { + if _, ok := err.(*fs.PathError); ok { + return err // don't need to wrap again + } + + return &fs.PathError{ + Op: op, + Path: path, + Err: err, + } +} + +// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open +type readDirFile struct { + File +} + +var _ fs.ReadDirFile = readDirFile{} + +func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + items, err := r.File.Readdir(n) + if err != nil { + return nil, err + } + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +// FromIOFS adopts io/fs.FS to use it as afero.Fs +// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission +// To store modifications you may use afero.CopyOnWriteFs +type FromIOFS struct { + fs.FS +} + +var _ Fs = FromIOFS{} + +func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } + +func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } + +func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { + return notImplemented("mkdirall", path) +} + +func (f FromIOFS) Open(name string) (File, error) { + file, err := f.FS.Open(name) + if err != nil { + return nil, err + } + + return fromIOFSFile{File: file, name: name}, nil +} + +func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return f.Open(name) +} + +func (f FromIOFS) Remove(name string) error { + return notImplemented("remove", name) +} + +func (f FromIOFS) RemoveAll(path string) error { + return notImplemented("removeall", path) +} + +func (f FromIOFS) Rename(oldname, newname string) error { + return notImplemented("rename", oldname) +} + +func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } + +func (f FromIOFS) Name() string { return "fromiofs" } + +func (f FromIOFS) Chmod(name string, mode os.FileMode) error { + return notImplemented("chmod", name) +} + +func (f FromIOFS) Chown(name string, uid, gid int) error { + return notImplemented("chown", name) +} + +func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { + return notImplemented("chtimes", name) +} + +type fromIOFSFile struct { + fs.File + name string +} + +func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { + readerAt, ok := f.File.(io.ReaderAt) + if !ok { + return -1, notImplemented("readat", f.name) + } + + return readerAt.ReadAt(p, off) +} + +func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { + seeker, ok := f.File.(io.Seeker) + if !ok { + return -1, notImplemented("seek", f.name) + } + + return seeker.Seek(offset, whence) +} + +func (f fromIOFSFile) Write(p []byte) (n int, err error) { + return -1, notImplemented("write", f.name) +} + +func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { + return -1, notImplemented("writeat", f.name) +} + +func (f fromIOFSFile) Name() string { return f.name } + +func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(count) + if err != nil { + return nil, err + } + + ret := make([]os.FileInfo, len(entries)) + for i := range entries { + ret[i], err = entries[i].Info() + + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(n) + if err != nil { + return nil, err + } + + ret := make([]string, len(entries)) + for i := range entries { + ret[i] = entries[i].Name() + } + + return ret, nil +} + +func (f fromIOFSFile) Sync() error { return nil } + +func (f fromIOFSFile) Truncate(size int64) error { + return notImplemented("truncate", f.name) +} + +func (f fromIOFSFile) WriteString(s string) (ret int, err error) { + return -1, notImplemented("writestring", f.name) +} + +func notImplemented(op, path string) error { + return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go index a403133e27..fa6abe1eee 100644 --- a/vendor/github.com/spf13/afero/ioutil.go +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -141,8 +141,10 @@ func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in // TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex +var ( + randNum uint32 + randmu sync.Mutex +) func reseed() uint32 { return uint32(time.Now().UnixNano() + int64(os.Getpid())) @@ -150,12 +152,12 @@ func reseed() uint32 { func nextRandom() string { randmu.Lock() - r := rand + r := randNum if r == 0 { r = reseed() } r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r + randNum = r randmu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } @@ -190,11 +192,11 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { nconflict := 0 for i := 0; i < 10000; i++ { name := filepath.Join(dir, prefix+nextRandom()+suffix) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue @@ -214,6 +216,7 @@ func TempFile(fs Fs, dir, pattern string) (f File, err error) { func (a Afero) TempDir(dir, prefix string) (name string, err error) { return TempDir(a.Fs, dir, prefix) } + func TempDir(fs Fs, dir, prefix string) (name string, err error) { if dir == "" { dir = os.TempDir() @@ -222,11 +225,11 @@ func TempDir(fs Fs, dir, prefix string) (name string, err error) { nconflict := 0 for i := 0; i < 10000; i++ { try := filepath.Join(dir, prefix+nextRandom()) - err = fs.Mkdir(try, 0700) + err = fs.Mkdir(try, 0o700) if os.IsExist(err) { if nconflict++; nconflict > 10 { randmu.Lock() - rand = reseed() + randNum = reseed() randmu.Unlock() } continue diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 07b2e12ae5..62fe4498e1 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -18,16 +18,20 @@ import ( "bytes" "errors" "io" + "io/fs" "os" "path/filepath" "sync" "sync/atomic" -) + "time" -import "time" + "github.com/spf13/afero/internal/common" +) const FilePathSeparator = string(filepath.Separator) +var _ fs.ReadDirFile = &File{} + type File struct { // atomic requires 64-bit alignment for struct field access at int64 @@ -57,6 +61,8 @@ type FileData struct { dir bool mode os.FileMode modtime time.Time + uid int + gid int } func (d *FileData) Name() string { @@ -70,7 +76,7 @@ func CreateFile(name string) *FileData { } func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} + return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} } func ChangeFileName(f *FileData, newname string) { @@ -95,6 +101,18 @@ func setModTime(f *FileData, mtime time.Time) { f.modtime = mtime } +func SetUID(f *FileData, uid int) { + f.Lock() + f.uid = uid + f.Unlock() +} + +func SetGID(f *FileData, gid int) { + f.Lock() + f.gid = gid + f.Unlock() +} + func GetFileInfo(f *FileData) *FileInfo { return &FileInfo{f} } @@ -170,10 +188,23 @@ func (f *File) Readdirnames(n int) (names []string, err error) { return names, err } +// Implements fs.ReadDirFile +func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { + fi, err := f.Readdir(n) + if err != nil { + return nil, err + } + di := make([]fs.DirEntry, len(fi)) + for i, f := range fi { + di[i] = common.FileInfoDirEntry{FileInfo: f} + } + return di, nil +} + func (f *File) Read(b []byte) (n int, err error) { f.fileData.Lock() defer f.fileData.Unlock() - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if len(b) > 0 && int(f.at) == len(f.fileData.data) { @@ -201,7 +232,7 @@ func (f *File) ReadAt(b []byte, off int64) (n int, err error) { } func (f *File) Truncate(size int64) error { - if f.closed == true { + if f.closed { return ErrFileClosed } if f.readOnly { @@ -210,9 +241,11 @@ func (f *File) Truncate(size int64) error { if size < 0 { return ErrOutOfRange } + f.fileData.Lock() + defer f.fileData.Unlock() if size > int64(len(f.fileData.data)) { diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) } else { f.fileData.data = f.fileData.data[0:size] } @@ -221,7 +254,7 @@ func (f *File) Truncate(size int64) error { } func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } switch whence { @@ -236,7 +269,7 @@ func (f *File) Seek(offset int64, whence int) (int64, error) { } func (f *File) Write(b []byte) (n int, err error) { - if f.closed == true { + if f.closed { return 0, ErrFileClosed } if f.readOnly { @@ -252,7 +285,7 @@ func (f *File) Write(b []byte) (n int, err error) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) @@ -288,16 +321,19 @@ func (s *FileInfo) Name() string { s.Unlock() return name } + func (s *FileInfo) Mode() os.FileMode { s.Lock() defer s.Unlock() return s.mode } + func (s *FileInfo) ModTime() time.Time { s.Lock() defer s.Unlock() return s.modtime } + func (s *FileInfo) IsDir() bool { s.Lock() defer s.Unlock() @@ -315,8 +351,8 @@ func (s *FileInfo) Size() int64 { var ( ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index 0fa9592499..d6c744e8d5 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -15,9 +15,13 @@ package afero import ( "fmt" + "io" + "log" "os" "path/filepath" + + "sort" "strings" "sync" "time" @@ -43,7 +47,7 @@ func (m *MemMapFs) getData() map[string]*mem.FileData { // Root should always exist, right? // TODO: what about windows? root := mem.CreateDir(FilePathSeparator) - mem.SetMode(root, os.ModeDir|0755) + mem.SetMode(root, os.ModeDir|0o755) m.data[FilePathSeparator] = root }) return m.data @@ -87,6 +91,24 @@ func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { return pfile } +func (m *MemMapFs) findDescendants(name string) []*mem.FileData { + fData := m.getData() + descendants := make([]*mem.FileData, 0, len(fData)) + for p, dFile := range fData { + if strings.HasPrefix(p, name+FilePathSeparator) { + descendants = append(descendants, dFile) + } + } + + sort.Slice(descendants, func(i, j int) bool { + cur := len(strings.Split(descendants[i].Name(), FilePathSeparator)) + next := len(strings.Split(descendants[j].Name(), FilePathSeparator)) + return cur < next + }) + + return descendants +} + func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { if f == nil { return @@ -96,12 +118,12 @@ func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { pdir := filepath.Dir(filepath.Clean(f.Name())) err := m.lockfreeMkdir(pdir, perm) if err != nil { - //log.Println("Mkdir error:", err) + // log.Println("Mkdir error:", err) return } parent, err = m.lockfreeOpen(pdir) if err != nil { - //log.Println("Open after Mkdir error:", err) + // log.Println("Open after Mkdir error:", err) return } } @@ -142,6 +164,11 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { } m.mu.Lock() + // Dobule check that it doesn't exist. + if _, ok := m.getData()[name]; ok { + m.mu.Unlock() + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } item := mem.CreateDir(name) mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item @@ -232,7 +259,7 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) } if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) + _, err = file.Seek(0, io.SeekEnd) if err != nil { file.Close() return nil, err @@ -279,7 +306,7 @@ func (m *MemMapFs) RemoveAll(path string) error { defer m.mu.RUnlock() for p := range m.getData() { - if strings.HasPrefix(p, path) { + if p == path || strings.HasPrefix(p, path+FilePathSeparator) { m.mu.RUnlock() m.mu.Lock() delete(m.getData(), p) @@ -303,11 +330,22 @@ func (m *MemMapFs) Rename(oldname, newname string) error { if _, ok := m.getData()[oldname]; ok { m.mu.RUnlock() m.mu.Lock() - m.unRegisterWithParent(oldname) + err := m.unRegisterWithParent(oldname) + if err != nil { + return err + } + fileData := m.getData()[oldname] - delete(m.getData(), oldname) mem.ChangeFileName(fileData, newname) m.getData()[newname] = fileData + + err = m.renameDescendants(oldname, newname) + if err != nil { + return err + } + + delete(m.getData(), oldname) + m.registerWithParent(fileData, 0) m.mu.Unlock() m.mu.RLock() @@ -317,6 +355,29 @@ func (m *MemMapFs) Rename(oldname, newname string) error { return nil } +func (m *MemMapFs) renameDescendants(oldname, newname string) error { + descendants := m.findDescendants(oldname) + removes := make([]string, 0, len(descendants)) + for _, desc := range descendants { + descNewName := strings.Replace(desc.Name(), oldname, newname, 1) + err := m.unRegisterWithParent(desc.Name()) + if err != nil { + return err + } + + removes = append(removes, desc.Name()) + mem.ChangeFileName(desc, descNewName) + m.getData()[descNewName] = desc + + m.registerWithParent(desc, 0) + } + for _, r := range removes { + delete(m.getData(), r) + } + + return nil +} + func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { fileInfo, err := m.Stat(name) return fileInfo, false, err @@ -363,6 +424,22 @@ func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { return nil } +func (m *MemMapFs) Chown(name string, uid, gid int) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} + } + + mem.SetUID(f, uid) + mem.SetGID(f, gid) + + return nil +} + func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { name = normalizePath(name) @@ -386,9 +463,3 @@ func (m *MemMapFs) List() { fmt.Println(x.Name(), y.Size()) } } - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go index 4761db5d72..f1366321ec 100644 --- a/vendor/github.com/spf13/afero/os.go +++ b/vendor/github.com/spf13/afero/os.go @@ -91,6 +91,10 @@ func (OsFs) Chmod(name string, mode os.FileMode) error { return os.Chmod(name, mode) } +func (OsFs) Chown(name string, uid, gid int) error { + return os.Chown(name, uid, gid) +} + func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { return os.Chtimes(name, atime, mtime) } diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go index f94b181b6c..bd8f9264dd 100644 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -28,6 +28,10 @@ func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { return syscall.EPERM } +func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { + return syscall.EPERM +} + func (r *ReadOnlyFs) Name() string { return "ReadOnlyFilter" } diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go index c8fc008670..218f3b235b 100644 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -10,7 +10,6 @@ import ( // The RegexpFs filters files (not directories) by regular expression. Only // files matching the given regexp will be allowed, all others get a ENOENT error ( // "No such file or directory"). -// type RegexpFs struct { re *regexp.Regexp source Fs @@ -60,6 +59,13 @@ func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { return r.source.Chmod(name, mode) } +func (r *RegexpFs) Chown(name string, uid, gid int) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chown(name, uid, gid) +} + func (r *RegexpFs) Name() string { return "RegexpFs" } diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go index d1c6ea53d9..aa6ae125b6 100644 --- a/vendor/github.com/spf13/afero/symlink.go +++ b/vendor/github.com/spf13/afero/symlink.go @@ -21,9 +21,9 @@ import ( // filesystems saying so. // It indicates support for 3 symlink related interfaces that implement the // behaviors of the os methods: -// - Lstat -// - Symlink, and -// - Readlink +// - Lstat +// - Symlink, and +// - Readlink type Symlinker interface { Lstater Linker diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go index 985363eea7..62dd6c93c8 100644 --- a/vendor/github.com/spf13/afero/unionFile.go +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -47,7 +47,7 @@ func (f *UnionFile) Read(s []byte) (int, error) { if (err == nil || err == io.EOF) && f.Base != nil { // advance the file position also in the base file, the next // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil { // only overwrite err in case the seek fails: we need to // report an eventual io.EOF to the caller err = seekErr @@ -65,7 +65,7 @@ func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { if f.Layer != nil { n, err := f.Layer.ReadAt(s, o) if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) + _, err = f.Base.Seek(o+int64(n), io.SeekStart) } return n, err } @@ -130,7 +130,7 @@ func (f *UnionFile) Name() string { type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) + files := make(map[string]os.FileInfo) for _, fi := range lofi { files[fi.Name()] = fi @@ -151,7 +151,6 @@ var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, err } return rfi, nil - } // Readdir will weave the two directories together and @@ -268,20 +267,14 @@ func (f *UnionFile) WriteString(s string) (n int, err error) { return 0, BADFD } -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - +func copyFile(base Fs, layer Fs, name string, bfh File) error { // First make sure the directory exists exists, err := Exists(layer, filepath.Dir(name)) if err != nil { return err } if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? if err != nil { return err } @@ -315,3 +308,23 @@ func copyToLayer(base Fs, layer Fs, name string) error { } return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) } + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} + +func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { + bfh, err := base.OpenFile(name, flag, perm) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go index 4f253f481e..9e4cba2746 100644 --- a/vendor/github.com/spf13/afero/util.go +++ b/vendor/github.com/spf13/afero/util.go @@ -25,6 +25,7 @@ import ( "strings" "unicode" + "golang.org/x/text/runes" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" ) @@ -42,7 +43,7 @@ func WriteReader(fs Fs, path string, r io.Reader) (err error) { ospath := filepath.FromSlash(dir) if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r if err != nil { if err != os.ErrExist { return err @@ -70,7 +71,7 @@ func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { ospath := filepath.FromSlash(dir) if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r if err != nil { return } @@ -123,7 +124,7 @@ func GetTempDir(fs Fs, subPath string) string { return addSlash(dir) } - err := fs.MkdirAll(dir, 0777) + err := fs.MkdirAll(dir, 0o777) if err != nil { panic(err) } @@ -158,16 +159,12 @@ func UnicodeSanitize(s string) string { // Transform characters with accents into plain forms. func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) result, _, _ := transform.String(t, string(s)) return result } -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { return FileContainsBytes(a.Fs, filename, subslice) } @@ -200,7 +197,6 @@ func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, err // readerContains reports whether any of the subslices is within r. func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - if r == nil || len(subslices) == 0 { return false } @@ -299,6 +295,9 @@ func IsEmpty(fs Fs, path string) (bool, error) { } defer f.Close() list, err := f.Readdir(-1) + if err != nil { + return false, err + } return len(list) == 0, nil } return fi.Size() == 0, nil diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 439d3e1de4..2578d94b5e 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -1,4 +1,4 @@ -# Copyright 2013-2022 The Cobra Authors +# Copyright 2013-2023 The Cobra Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile index c433a01bce..0da8d7aa08 100644 --- a/vendor/github.com/spf13/cobra/Makefile +++ b/vendor/github.com/spf13/cobra/Makefile @@ -5,10 +5,6 @@ ifeq (, $(shell which golangci-lint)) $(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") endif -ifeq (, $(shell which richgo)) -$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest") -endif - .PHONY: fmt lint test install_deps clean default: all @@ -25,6 +21,10 @@ lint: test: install_deps $(info ******************** running tests ********************) + go test -v ./... + +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) richgo test -v ./... install_deps: diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 7cc726beb4..592c0b8ab0 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,4 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) +![cobra logo](assets/CobraMain.png) Cobra is a library for creating powerful modern CLI applications. @@ -6,7 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), [Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. -[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) [![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) [![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 95e03aecb6..2d0239437a 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 2c1f99e787..e79ec33a81 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ import ( type PositionalArgs func(cmd *Command, args []string) error -// Legacy arg validation has the following behaviour: +// legacyArgs validation has the following behaviour: // - root commands with no subcommands can take arbitrary arguments // - root commands with subcommands will do subcommand validity checking // - subcommands will always accept arbitrary arguments diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 3acdb27974..10c78847de 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -532,7 +532,7 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { } } -// Setup annotations for go completions for registered flags +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags func prepareCustomAnnotationsForFlags(cmd *Command) { flagCompletionMutex.RLock() defer flagCompletionMutex.RUnlock() diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index bb4b71892c..19b09560c1 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,7 +38,7 @@ func genBashComp(buf io.StringWriter, name string, includeDesc bool) { __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -65,7 +65,7 @@ __%[1]s_get_completion_results() { lastChar=${lastParam:$((${#lastParam}-1)):1} __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" - if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + if [[ -z ${cur} && ${lastChar} != = ]]; then # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" @@ -75,7 +75,7 @@ __%[1]s_get_completion_results() { # When completing a flag with an = (e.g., %[1]s -n=) # bash focuses on the part after the =, so we need to remove # the flag part from $cur - if [[ "${cur}" == -*=* ]]; then + if [[ ${cur} == -*=* ]]; then cur="${cur#*=}" fi @@ -87,7 +87,7 @@ __%[1]s_get_completion_results() { directive=${out##*:} # Remove the directive out=${out%%:*} - if [ "${directive}" = "${out}" ]; then + if [[ ${directive} == "${out}" ]]; then # There is not directive specified directive=0 fi @@ -101,22 +101,36 @@ __%[1]s_process_completion_results() { local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + if (((directive & shellCompDirectiveError) != 0)); then # Error code. No completion. __%[1]s_debug "Received error from custom completion go code" return else - if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no space" compopt -o nospace else __%[1]s_debug "No space directive not supported in this version of bash" fi fi - if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then - if [[ $(type -t compopt) = "builtin" ]]; then + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then __%[1]s_debug "Activating no file completion" compopt +o default else @@ -130,7 +144,7 @@ __%[1]s_process_completion_results() { local activeHelp=() __%[1]s_extract_activeHelp - if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering local fullFilter filter filteringCmd @@ -143,13 +157,12 @@ __%[1]s_process_completion_results() { filteringCmd="_filedir $fullFilter" __%[1]s_debug "File filtering command: $filteringCmd" $filteringCmd - elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then # File completion for directories only - # Use printf to strip any trailing newline local subdir - subdir=$(printf "%%s" "${completions[0]}") - if [ -n "$subdir" ]; then + subdir=${completions[0]} + if [[ -n $subdir ]]; then __%[1]s_debug "Listing directories in $subdir" pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return else @@ -164,7 +177,7 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish - if [ ${#activeHelp[*]} -ne 0 ]; then + if ((${#activeHelp[*]} != 0)); then printf "\n"; printf "%%s\n" "${activeHelp[@]}" printf "\n" @@ -184,21 +197,21 @@ __%[1]s_process_completion_results() { # Separate activeHelp lines from real completions. # Fills the $activeHelp and $completions arrays. __%[1]s_extract_activeHelp() { - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do - if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" - if [ -n "$comp" ]; then + if [[ -n $comp ]]; then activeHelp+=("$comp") fi else # Not an activeHelp line but a normal completion completions+=("$comp") fi - done < <(printf "%%s\n" "${out}") + done <<<"${out}" } __%[1]s_handle_completion_types() { @@ -254,7 +267,7 @@ __%[1]s_handle_standard_completion_case() { done < <(printf "%%s\n" "${completions[@]}") # If there is a single completion left, remove the description text - if [ ${#COMPREPLY[*]} -eq 1 ]; then + if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" comp="${COMPREPLY[0]%%%%$tab*}" __%[1]s_debug "Removed description from single completion, which is now: ${comp}" @@ -271,8 +284,8 @@ __%[1]s_handle_special_char() if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then local word=${comp%%"${comp##*${char}}"} local idx=${#COMPREPLY[*]} - while [[ $((--idx)) -ge 0 ]]; do - COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} done fi } @@ -298,7 +311,7 @@ __%[1]s_format_comp_descriptions() # Make sure we can fit a description of at least 8 characters # if we are to align the descriptions. - if [[ $maxdesclength -gt 8 ]]; then + if ((maxdesclength > 8)); then # Add the proper number of spaces to align the descriptions for ((i = ${#comp} ; i < longest ; i++)); do comp+=" " @@ -310,8 +323,8 @@ __%[1]s_format_comp_descriptions() # If there is enough space for any description text, # truncate the descriptions that are too long for the shell width - if [ $maxdesclength -gt 0 ]; then - if [ ${#desc} -gt $maxdesclength ]; then + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then desc=${desc:0:$(( maxdesclength - 1 ))} desc+="…" fi @@ -332,9 +345,9 @@ __start_%[1]s() # Call _init_completion from the bash-completion package # to prepare the arguments properly if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -n "=:" || return + _init_completion -n =: || return else - __%[1]s_init_completion -n "=:" || return + __%[1]s_init_completion -n =: || return fi __%[1]s_debug @@ -361,7 +374,7 @@ fi # ex: ts=4 sw=4 et filetype=sh `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index fe44bc8a07..b07b44a0ce 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -167,8 +167,8 @@ func appendIfNotPresent(s, stringToAppend string) string { // rpad adds padding to the right of a string. func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) } // tmpl executes the given template text on data, writing the result to w. diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 9d5e9cf5eb..01f7c6f1c5 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist -// Structure to manage groups for commands +// Group Structure to manage groups for commands type Group struct { ID string Title string @@ -47,7 +47,7 @@ type Group struct { // definition to ensure usability. type Command struct { // Use is the one-line usage message. - // Recommended syntax is as follow: + // Recommended syntax is as follows: // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. // ... indicates that you can specify multiple values for the previous argument. // | indicates mutually exclusive information. You can use the argument to the left of the separator or the @@ -321,7 +321,7 @@ func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } -// SetHelpCommandGroup sets the group id of the help command. +// SetHelpCommandGroupID sets the group id of the help command. func (c *Command) SetHelpCommandGroupID(groupID string) { if c.helpCommand != nil { c.helpCommand.GroupID = groupID @@ -330,7 +330,7 @@ func (c *Command) SetHelpCommandGroupID(groupID string) { c.helpCommandGroupID = groupID } -// SetCompletionCommandGroup sets the group id of the completion command. +// SetCompletionCommandGroupID sets the group id of the completion command. func (c *Command) SetCompletionCommandGroupID(groupID string) { // completionCommandGroupID is used if no completion command is defined by the user c.Root().completionCommandGroupID = groupID @@ -655,20 +655,44 @@ Loop: // argsMinusFirstX removes only the first x from args. Otherwise, commands that look like // openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := []string{} + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } } } return args } func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || + return ((len(arg) >= 3 && arg[0:2] == "--") || (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) } @@ -686,7 +710,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) { cmd := c.findNext(nextSubCmd) if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) } return c, innerArgs } @@ -998,6 +1022,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize completion at the last point to allow for user overriding c.InitDefaultCompletionCmd() + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + args := c.args // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 @@ -1092,6 +1120,19 @@ func (c *Command) ValidateRequiredFlags() error { return nil } +// checkCommandGroups checks if a command has been added to a group that does not exists. +// If so, we panic because it indicates a coding error that should be corrected. +func (c *Command) checkCommandGroups() { + for _, sub := range c.commands { + // if Group is not defined let the developer know right away + if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { + panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) + } + + sub.checkCommandGroups() + } +} + // InitDefaultHelpFlag adds default help flag to c. // It is called automatically by executing the c or by calling help and usage. // If c already has help flag, it will do nothing. @@ -1218,10 +1259,6 @@ func (c *Command) AddCommand(cmds ...*Command) { panic("Command can't be a child of itself") } cmds[i].parent = c - // if Group is not defined let the developer know right away - if x.GroupID != "" && !c.ContainsGroup(x.GroupID) { - panic(fmt.Sprintf("Group id '%s' is not defined for subcommand '%s'", x.GroupID, cmds[i].CommandPath())) - } // update max lengths usageLen := len(x.Use) if usageLen > c.commandsMaxUseLen { @@ -1259,7 +1296,7 @@ func (c *Command) AllChildCommandsHaveGroup() bool { return true } -// ContainGroups return if groupID exists in the list of command groups. +// ContainsGroup return if groupID exists in the list of command groups. func (c *Command) ContainsGroup(groupID string) bool { for _, x := range c.commandgroups { if x.ID == groupID { diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go index 2b77f8f019..307f0c127f 100644 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go index 520f23abf0..adbef395c2 100644 --- a/vendor/github.com/spf13/cobra/command_win.go +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index e8a0206db1..ee38c4d0b8 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,10 @@ const ( // obtain the same behavior but only for flags. ShellCompDirectiveFilterDirs + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + // =========================================================================== // All directives using iota should be above this one. @@ -159,6 +163,9 @@ func (d ShellCompDirective) string() string { if d&ShellCompDirectiveFilterDirs != 0 { directives = append(directives, "ShellCompDirectiveFilterDirs") } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } if len(directives) == 0 { directives = append(directives, "ShellCompDirectiveDefault") } @@ -169,7 +176,7 @@ func (d ShellCompDirective) string() string { return strings.Join(directives, ", ") } -// Adds a special hidden command that can be used to request custom completions. +// initCompleteCmd adds a special hidden command that can be used to request custom completions. func (c *Command) initCompleteCmd(args []string) { completeCmd := &Command{ Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), @@ -727,7 +734,7 @@ to enable it. You can execute the following once: To load completions in your current shell session: - source <(%[1]s completion zsh); compdef _%[1]s %[1]s + source <(%[1]s completion zsh) To load completions for every new session, execute once: diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go index 97112a17b2..12ca0d2b11 100644 --- a/vendor/github.com/spf13/cobra/fish_completions.go +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ function __%[1]s_perform_completion __%[1]s_debug "last arg: $lastArg" # Disable ActiveHelp which is not supported for fish shell - set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" __%[1]s_debug "Calling $requestComp" set -l results (eval $requestComp 2> /dev/null) @@ -89,6 +89,60 @@ function __%[1]s_perform_completion printf "%%s\n" "$directiveLine" end +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" + + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 + end + + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" + return 1 + end + + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + # This function does two things: # - Obtain the completions and store them in the global __%[1]s_comp_results # - Return false if file completion should be performed @@ -99,17 +153,17 @@ function __%[1]s_prepare_completions # Start fresh set --erase __%[1]s_comp_results - set -l results (__%[1]s_perform_completion) - __%[1]s_debug "Completion results: $results" + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" - if test -z "$results" + if test -z "$__%[1]s_perform_completion_once_result" __%[1]s_debug "No completion, probably due to a failure" # Might as well do file completion, in case it helps return 1 end - set -l directive (string sub --start 2 $results[-1]) - set --global __%[1]s_comp_results $results[1..-2] + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] __%[1]s_debug "Completions are: $__%[1]s_comp_results" __%[1]s_debug "Directive is: $directive" @@ -205,13 +259,17 @@ end # Remove any pre-existing completions for the program since we will be handling all of them. complete -c %[2]s -e +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' # The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results # which provides the program's completion choices. -complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' - +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' `, nameForVar, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } // GenFishCompletion generates fish completion file and writes to the passed writer. diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 9c377aaf9c..b35fde1554 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 004de42e41..177d2755f2 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -77,6 +77,7 @@ filter __%[1]s_escapeStringWithSpecialChars { $ShellCompDirectiveNoFileComp=%[6]d $ShellCompDirectiveFilterFileExt=%[7]d $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d # Prepare the command to request completions for the program. # Split the command at the first space to separate the program and arguments. @@ -106,13 +107,22 @@ filter __%[1]s_escapeStringWithSpecialChars { # If the last parameter is complete (there is a space following it) # We add an extra empty parameter so we can indicate this to the go method. __%[1]s_debug "Adding extra empty parameter" -`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` -`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } } __%[1]s_debug "Calling $RequestComp" # First disable ActiveHelp which is not supported for Powershell - $env:%[9]s=0 + $env:%[10]s=0 #call the command store the output in $out and redirect stderr and stdout to null # $Out is an array contains each line per element @@ -137,7 +147,7 @@ filter __%[1]s_escapeStringWithSpecialChars { } $Longest = 0 - $Values = $Out | ForEach-Object { + [Array]$Values = $Out | ForEach-Object { #Split the output in name and description `+" $Name, $Description = $_.Split(\"`t\",2)"+` __%[1]s_debug "Name: $Name Description: $Description" @@ -182,6 +192,11 @@ filter __%[1]s_escapeStringWithSpecialChars { } } + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { __%[1]s_debug "ShellCompDirectiveNoFileComp is called" @@ -267,7 +282,7 @@ filter __%[1]s_escapeStringWithSpecialChars { Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock `, name, nameForVar, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) } func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { diff --git a/vendor/github.com/spf13/cobra/projects_using_cobra.md b/vendor/github.com/spf13/cobra/projects_using_cobra.md index 6865f88e79..8a291eb20e 100644 --- a/vendor/github.com/spf13/cobra/projects_using_cobra.md +++ b/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -1,11 +1,13 @@ ## Projects using Cobra - [Allero](https://github.com/allero-io/allero) +- [Arewefastyet](https://benchmark.vitess.io) - [Arduino CLI](https://github.com/arduino/arduino-cli) - [Bleve](https://blevesearch.com/) - [Cilium](https://cilium.io/) - [CloudQuery](https://github.com/cloudquery/cloudquery) - [CockroachDB](https://www.cockroachlabs.com/) +- [Constellation](https://github.com/edgelesssys/constellation) - [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) - [Datree](https://github.com/datreeio/datree) - [Delve](https://github.com/derekparker/delve) @@ -25,7 +27,7 @@ - [Istio](https://istio.io) - [Kool](https://github.com/kool-dev/kool) - [Kubernetes](https://kubernetes.io/) -- [Kubescape](https://github.com/armosec/kubescape) +- [Kubescape](https://github.com/kubescape/kubescape) - [KubeVirt](https://github.com/kubevirt/kubevirt) - [Linkerd](https://linkerd.io/) - [Mattermost-server](https://github.com/mattermost/mattermost-server) @@ -51,10 +53,12 @@ - [Random](https://github.com/erdaltsksn/random) - [Rclone](https://rclone.org/) - [Scaleway CLI](https://github.com/scaleway/scaleway-cli) +- [Sia](https://github.com/SiaFoundation/siad) - [Skaffold](https://skaffold.dev/) - [Tendermint](https://github.com/tendermint/tendermint) - [Twitch CLI](https://github.com/twitchdev/twitch-cli) - [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- [Vitess](https://vitess.io) - VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) - [Werf](https://werf.io/) - [ZITADEL](https://github.com/zitadel/zitadel) diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go index 126e83c307..b035742d39 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.go +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/spf13/cobra/shell_completions.md b/vendor/github.com/spf13/cobra/shell_completions.md index 553ee5df8a..065c0621d4 100644 --- a/vendor/github.com/spf13/cobra/shell_completions.md +++ b/vendor/github.com/spf13/cobra/shell_completions.md @@ -71,7 +71,7 @@ PowerShell: `,cmd.Root().Name()), DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, - Args: cobra.ExactValidArgs(1), + Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": @@ -162,16 +162,7 @@ cmd := &cobra.Command{ } ``` -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -$ kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of -replication controllers following `rc`. +The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`. ### Dynamic completion of nouns @@ -237,6 +228,10 @@ ShellCompDirectiveFilterFileExt // return []string{"themes"}, ShellCompDirectiveFilterDirs // ShellCompDirectiveFilterDirs + +// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order +// in which the completions are provided +ShellCompDirectiveKeepOrder ``` ***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. @@ -385,6 +380,19 @@ or ```go ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} ``` + +If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like: + +```bash +$ source <(helm completion bash) +$ helm completion [tab][tab] +bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell) +fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh) + +$ source <(helm completion bash --no-descriptions) +$ helm completion [tab][tab] +bash fish powershell zsh +``` ## Bash completions ### Dependencies diff --git a/vendor/github.com/spf13/cobra/user_guide.md b/vendor/github.com/spf13/cobra/user_guide.md index 977306aa8c..85201d840c 100644 --- a/vendor/github.com/spf13/cobra/user_guide.md +++ b/vendor/github.com/spf13/cobra/user_guide.md @@ -188,6 +188,37 @@ var versionCmd = &cobra.Command{ } ``` +### Organizing subcommands + +A command may have subcommands which in turn may have other subcommands. This is achieved by using +`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in +its own go package. + +The suggested approach is for the parent command to use `AddCommand` to add its most immediate +subcommands. For example, consider the following directory structure: + +```text +├── cmd +│   ├── root.go +│   └── sub1 +│   ├── sub1.go +│   └── sub2 +│   ├── leafA.go +│   ├── leafB.go +│   └── sub2.go +└── main.go +``` + +In this case: + +* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command. +* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command. +* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the + sub2 command. + +This approach ensures the subcommands are always included at compile time while avoiding cyclic +references. + ### Returning and handling errors If you wish to return an error to the caller of a command, `RunE` can be used. @@ -313,8 +344,8 @@ rootCmd.MarkFlagsRequiredTogether("username", "password") You can also prevent different flags from being provided together if they represent mutually exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: ```go -rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON") -rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML") +rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON") +rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML") rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") ``` @@ -349,7 +380,7 @@ shown below: ```go var cmd = &cobra.Command{ Short: "hello", - Args: MatchAll(ExactArgs(2), OnlyValidArgs), + Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { fmt.Println("Hello, World!") }, @@ -492,10 +523,11 @@ around it. In fact, you can provide your own if you want. ### Grouping commands in help -Cobra supports grouping of available commands. Groups must be explicitly defined by `AddGroup` and set by -the `GroupId` element of a subcommand. The groups will appear in the same order as they are defined. -If you use the generated `help` or `completion` commands, you can set the group ids by `SetHelpCommandGroupId` -and `SetCompletionCommandGroupId`, respectively. +Cobra supports grouping of available commands in the help output. To group commands, each group must be explicitly +defined using `AddGroup()` on the parent command. Then a subcommand can be added to a group using the `GroupID` element +of that subcommand. The groups will appear in the help output in the same order as they are defined using different +calls to `AddGroup()`. If you use the generated `help` or `completion` commands, you can set their group ids using +`SetHelpCommandGroupId()` and `SetCompletionCommandGroupId()` on the root command, respectively. ### Defining your own help diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 84cec76fde..1856e4c7f6 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,4 +1,4 @@ -// Copyright 2013-2022 The Cobra Authors +// Copyright 2013-2023 The Cobra Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -90,6 +90,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) { compCmd = ShellCompNoDescRequestCmd } WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s # zsh completion for %-36[1]s -*- shell-script -*- @@ -108,8 +109,9 @@ _%[1]s() local shellCompDirectiveNoFileComp=%[5]d local shellCompDirectiveFilterFileExt=%[6]d local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d - local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder local -a completions __%[1]s_debug "\n========= starting completion logic ==========" @@ -177,7 +179,7 @@ _%[1]s() return fi - local activeHelpMarker="%[8]s" + local activeHelpMarker="%[9]s" local endIndex=${#activeHelpMarker} local startIndex=$((${#activeHelpMarker}+1)) local hasActiveHelp=0 @@ -227,6 +229,11 @@ _%[1]s() noSpace="-S ''" fi + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then # File extension filtering local filteringCmd @@ -262,7 +269,7 @@ _%[1]s() return $result else __%[1]s_debug "Calling _describe" - if eval _describe "completions" completions $flagPrefix $noSpace; then + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then __%[1]s_debug "_describe found some completions" # Return the success of having called _describe @@ -296,6 +303,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then fi `, name, compCmd, ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, - ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpMarker)) } diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig index 63afcbcdd4..6d0b6d356b 100644 --- a/vendor/github.com/spf13/viper/.editorconfig +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -11,5 +11,5 @@ trim_trailing_whitespace = true [*.go] indent_style = tab -[{Makefile, *.mk}] +[{Makefile,*.mk}] indent_style = tab diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml new file mode 100644 index 0000000000..16e039652e --- /dev/null +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -0,0 +1,96 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/viper) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/spf13/viper + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - dupl + - durationcheck + - exhaustive + - exportloopref + - gci + - gofmt + - gofumpt + - goimports + - gomoddirectives + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - nakedret + - nilerr + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - tparallel + - typecheck + - unconvert + - unparam + - unused + - varcheck + - wastedassign + - whitespace + + # fixme + # - cyclop + # - errcheck + # - errorlint + # - exhaustivestruct + # - forbidigo + # - forcetypeassert + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocritic + # - gocyclo + # - godot + # - gosec + # - gosimple + # - ifshort + # - lll + # - nlreturn + # - paralleltest + # - scopelint + # - thelper + # - wrapcheck + + # unused + # - depguard + # - goheader + # - gomodguard + + # don't enable: + # - asciicheck + # - funlen + # - godox + # - goerr113 + # - gomnd + # - interfacer + # - maligned + # - nestif + # - testpackage + # - wsl diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml deleted file mode 100644 index a0755ce7e1..0000000000 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - golint: - min-confidence: 0.1 - goimports: - local-prefixes: github.com/spf13/viper - -linters: - enable-all: true - disable: - - funlen - - maligned - - # TODO: fix me - - wsl - - gochecknoinits - - gosimple - - gochecknoglobals - - errcheck - - lll - - godox - - scopelint - - gocyclo - - gocognit - - gocritic - -service: - golangci-lint-version: 1.21.x diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index 1c2cab03f4..02d3e3715a 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -15,8 +15,8 @@ TEST_FORMAT = short-verbose endif # Dependency versions -GOTESTSUM_VERSION = 0.4.0 -GOLANGCI_VERSION = 1.21.0 +GOTESTSUM_VERSION = 1.8.0 +GOLANGCI_VERSION = 1.45.2 # Add the ability to override some variables # Use with care @@ -49,7 +49,7 @@ bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint-${GOLANGCI_VERSION}: @mkdir -p bin curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} - @mv bin/golangci-lint $@ + @mv bin/golangci-lint "$@" .PHONY: lint lint: bin/golangci-lint ## Run linter diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index dfd8034fd5..c14e8927a1 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -1,11 +1,18 @@ +> ## Viper v2 feedback +> Viper is heading towards v2 and we would love to hear what _**you**_ would like to see in it. Share your thoughts here: https://forms.gle/R6faU74qPRPAzchZ9 +> +> **Thank you!** + ![Viper](.github/logo.png?raw=true) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) +[![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) [![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/viper) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -24,10 +31,12 @@ Many Go projects are built using Viper including: ## Install -```console +```shell go get github.com/spf13/viper ``` +**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. + ## What is Viper? @@ -110,7 +119,7 @@ viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %s \n", err)) + panic(fmt.Errorf("Fatal error config file: %w \n", err)) } ``` @@ -118,11 +127,11 @@ You can handle the specific case where no config file is found like this: ```go if err := viper.ReadInConfig(); err != nil { - if _, ok := err.(viper.ConfigFileNotFoundError); ok { - // Config file not found; ignore error if desired - } else { - // Config file was found but another error was produced - } + if _, ok := err.(viper.ConfigFileNotFoundError); ok { + // Config file not found; ignore error if desired + } else { + // Config file was found but another error was produced + } } // Config file found and successfully parsed @@ -166,10 +175,10 @@ Optionally you can provide a function for Viper to run each time a change occurs **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go -viper.WatchConfig() viper.OnConfigChange(func(e fsnotify.Event) { fmt.Println("Config file changed:", e.Name) }) +viper.WatchConfig() ``` ### Reading Config from io.Reader @@ -245,9 +254,10 @@ using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from the environment variables. Both `BindEnv` and `AutomaticEnv` will use this prefix. -`BindEnv` takes one or two parameters. The first parameter is the key name, the -second is the name of the environment variable. The name of the environment -variable is case sensitive. If the ENV variable name is not provided, then +`BindEnv` takes one or more parameters. The first parameter is the key name, the +rest are the name of the environment variables to bind to this key. If more than +one are provided, they will take precedence in the specified order. The name of +the environment variable is case sensitive. If the ENV variable name is not provided, then Viper will automatically assume that the ENV variable matches the following format: prefix + "_" + the key name in ALL CAPS. When you explicitly provide the ENV variable name (the second parameter), it **does not** automatically add the prefix. For example if the second parameter is "id", Viper will look for the ENV variable "ID". @@ -259,7 +269,7 @@ the `BindEnv` is called. `AutomaticEnv` is a powerful helper especially when combined with `SetEnvPrefix`. When called, Viper will check for an environment variable any time a `viper.Get` request is made. It will apply the following rules. It will -check for a environment variable with a name matching the key uppercased and +check for an environment variable with a name matching the key uppercased and prefixed with the `EnvPrefix` if set. `SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env @@ -344,7 +354,7 @@ func main() { i := viper.GetInt("flagname") // retrieve value from viper - ... + // ... } ``` @@ -493,18 +503,18 @@ runtime_viper.Unmarshal(&runtime_conf) // open a goroutine to watch remote changes forever go func(){ for { - time.Sleep(time.Second * 5) // delay after each request - - // currently, only tested with etcd support - err := runtime_viper.WatchRemoteConfig() - if err != nil { - log.Errorf("unable to read remote config: %v", err) - continue - } - - // unmarshal new config into our runtime config struct. you can also use channel - // to implement a signal to notify the system of the changes - runtime_viper.Unmarshal(&runtime_conf) + time.Sleep(time.Second * 5) // delay after each request + + // currently, only tested with etcd support + err := runtime_viper.WatchRemoteConfig() + if err != nil { + log.Errorf("unable to read remote config: %v", err) + continue + } + + // unmarshal new config into our runtime config struct. you can also use channel + // to implement a signal to notify the system of the changes + runtime_viper.Unmarshal(&runtime_conf) } }() ``` @@ -536,7 +546,7 @@ Example: ```go viper.GetString("logfile") // case-insensitive Setting & Getting if viper.GetBool("verbose") { - fmt.Println("verbose enabled") + fmt.Println("verbose enabled") } ``` ### Accessing nested keys @@ -582,6 +592,33 @@ the `Set()` method, …) with an immediate value, then all sub-keys of `datastore.metric` become undefined, they are “shadowed” by the higher-priority configuration level. +Viper can access array indices by using numbers in the path. For example: + +```json +{ + "host": { + "address": "localhost", + "ports": [ + 5799, + 6029 + ] + }, + "datastore": { + "metric": { + "host": "127.0.0.1", + "port": 3099 + }, + "warehouse": { + "host": "198.0.0.1", + "port": 2112 + } + } +} + +GetInt("host.ports.1") // returns 6029 + +``` + Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. @@ -607,14 +644,15 @@ will be returned instead. E.g. GetString("datastore.metric.host") // returns "0.0.0.0" ``` -### Extract sub-tree +### Extracting a sub-tree -Extract sub-tree from Viper. +When developing reusable modules, it's often useful to extract a subset of the configuration +and pass it to a module. This way the module can be instantiated more than once, with different configurations. -For example, `viper` represents: +For example, an application might use multiple different cache stores for different purposes: -```json -app: +```yaml +cache: cache1: max-items: 100 item-size: 64 @@ -623,35 +661,36 @@ app: item-size: 80 ``` -After executing: +We could pass the cache name to a module (eg. `NewCache("cache1")`), +but it would require weird concatenation for accessing config keys and would be less separated from the global config. -```go -subv := viper.Sub("app.cache1") -``` +So instead of doing that let's pass a Viper instance to the constructor that represents a subset of the configuration: -`subv` represents: +```go +cache1Config := viper.Sub("cache.cache1") +if cache1Config == nil { // Sub returns nil if the key cannot be found + panic("cache configuration not found") +} -```json -max-items: 100 -item-size: 64 +cache1 := NewCache(cache1Config) ``` -Suppose we have: +**Note:** Always check the return value of `Sub`. It returns `nil` if a key cannot be found. + +Internally, the `NewCache` function can address `max-items` and `item-size` keys directly: ```go -func NewCache(cfg *Viper) *Cache {...} +func NewCache(v *Viper) *Cache { + return &Cache{ + MaxItems: v.GetInt("max-items"), + ItemSize: v.GetInt("item-size"), + } +} ``` -which creates a cache based on config information formatted as `subv`. -Now it’s easy to create these 2 caches separately as: +The resulting code is easy to test, since it's decoupled from the main config structure, +and easier to reuse (for the same reason). -```go -cfg1 := viper.Sub("app.cache1") -cache1 := NewCache(cfg1) - -cfg2 := viper.Sub("app.cache2") -cache2 := NewCache(cfg2) -``` ### Unmarshaling @@ -687,18 +726,18 @@ you have to change the delimiter: v := viper.NewWithOptions(viper.KeyDelimiter("::")) v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ - "traefik.frontend.rule.type": "PathPrefix", - "traefik.ingress.kubernetes.io/ssl-redirect": "true", - }, - }, + "ingress": map[string]interface{}{ + "annotations": map[string]interface{}{ + "traefik.frontend.rule.type": "PathPrefix", + "traefik.ingress.kubernetes.io/ssl-redirect": "true", + }, + }, }) type config struct { Chart struct{ - Values map[string]interface{} - } + Values map[string]interface{} + } } var C config @@ -739,6 +778,15 @@ if err != nil { Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +### Decoding custom formats + +A frequently requested feature for Viper is adding more value formats and decoders. +For example, parsing character (dot, comma, semicolon, etc) separated strings into slices. + +This is already available in Viper using mapstructure decode hooks. + +Read more about the details in [this blog post](https://sagikazarmark.hu/blog/decoding-custom-formats-with-viper/). + ### Marshalling to string You may need to marshal all the settings held in viper into a string rather than write them to a file. @@ -746,17 +794,17 @@ You can use your favorite format's marshaller with the config returned by `AllSe ```go import ( - yaml "gopkg.in/yaml.v2" - // ... + yaml "gopkg.in/yaml.v2" + // ... ) func yamlStringSettings() string { - c := viper.AllSettings() - bs, err := yaml.Marshal(c) - if err != nil { - log.Fatalf("unable to marshal config to YAML: %v", err) - } - return string(bs) + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + log.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) } ``` @@ -792,15 +840,35 @@ y.SetDefault("ContentDir", "foobar") When working with multiple vipers, it is up to the user to keep track of the different vipers. + ## Q & A -Q: Why is it called “Viper”? +### Why is it called “Viper”? A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe)) to [Cobra](https://github.com/spf13/cobra). While both can operate completely independently, together they make a powerful pair to handle much of your application foundation needs. -Q: Why is it called “Cobra”? +### Why is it called “Cobra”? + +Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? + +### Does Viper support case sensitive keys? + +**tl;dr:** No. + +Viper merges configuration from various sources, many of which are either case insensitive or uses different casing than the rest of the sources (eg. env vars). +In order to provide the best experience when using multiple sources, the decision has been made to make all keys case insensitive. + +There has been several attempts to implement case sensitivity, but unfortunately it's not that trivial. We might take a stab at implementing it in [Viper v2](https://github.com/spf13/viper/issues/772), but despite the initial noise, it does not seem to be requested that much. + +You can vote for case sensitivity by filling out this feedback form: https://forms.gle/R6faU74qPRPAzchZ9 + +### Is it safe to concurrently read and write to a viper? + +No, you will need to synchronize access to the viper yourself (for example by using the `sync` package). Concurrent reads and writes can cause a panic. + +## Troubleshooting -A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)? +See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). diff --git a/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md new file mode 100644 index 0000000000..c4e36c6860 --- /dev/null +++ b/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -0,0 +1,32 @@ +# Troubleshooting + +## Unmarshaling doesn't work + +The most common reason for this issue is improper use of struct tags (eg. `yaml` or `json`). Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. Please refer to the library's documentation for using other struct tags. + +## Cannot find package + +Viper installation seems to fail a lot lately with the following (or a similar) error: + +``` +cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: +/usr/local/Cellar/go/1.15.7_1/libexec/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOROOT) +/Users/user/go/src/github.com/hashicorp/hcl/tree/hcl1 (from $GOPATH) +``` + +As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. +Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). + +The solution is easy: switch to using Go Modules. +Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. + +**tl;dr* `export GO111MODULE=on` + +## Unquoted 'y' and 'n' characters get replaced with _true_ and _false_ when reading a YAML file + +This is a YAML 1.1 feature according to [go-yaml/yaml#740](https://github.com/go-yaml/yaml/issues/740). + +Potential solutions are: + +1. Quoting values resolved as boolean +1. Upgrading to YAML v3 (for the time being this is possible by passing the `viper_yaml3` tag to your build) diff --git a/vendor/github.com/spf13/viper/experimental_logger.go b/vendor/github.com/spf13/viper/experimental_logger.go new file mode 100644 index 0000000000..206dad6a0c --- /dev/null +++ b/vendor/github.com/spf13/viper/experimental_logger.go @@ -0,0 +1,11 @@ +//go:build viper_logger +// +build viper_logger + +package viper + +// WithLogger sets a custom logger. +func WithLogger(l Logger) Option { + return optionFunc(func(v *Viper) { + v.logger = l + }) +} diff --git a/vendor/github.com/spf13/viper/fs.go b/vendor/github.com/spf13/viper/fs.go new file mode 100644 index 0000000000..ecb1769e52 --- /dev/null +++ b/vendor/github.com/spf13/viper/fs.go @@ -0,0 +1,65 @@ +//go:build go1.16 && finder +// +build go1.16,finder + +package viper + +import ( + "errors" + "io/fs" + "path" +) + +type finder struct { + paths []string + fileNames []string + extensions []string + + withoutExtension bool +} + +func (f finder) Find(fsys fs.FS) (string, error) { + for _, searchPath := range f.paths { + for _, fileName := range f.fileNames { + for _, extension := range f.extensions { + filePath := path.Join(searchPath, fileName+"."+extension) + + ok, err := fileExists(fsys, filePath) + if err != nil { + return "", err + } + + if ok { + return filePath, nil + } + } + + if f.withoutExtension { + filePath := path.Join(searchPath, fileName) + + ok, err := fileExists(fsys, filePath) + if err != nil { + return "", err + } + + if ok { + return filePath, nil + } + } + } + } + + return "", nil +} + +func fileExists(fsys fs.FS, filePath string) (bool, error) { + fileInfo, err := fs.Stat(fsys, filePath) + if err == nil { + return !fileInfo.IsDir(), nil + } + + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + + return false, err +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go new file mode 100644 index 0000000000..f472e9ff1a --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/decoder.go @@ -0,0 +1,61 @@ +package encoding + +import ( + "sync" +) + +// Decoder decodes the contents of b into v. +// It's primarily used for decoding contents of a file into a map[string]interface{}. +type Decoder interface { + Decode(b []byte, v map[string]interface{}) error +} + +const ( + // ErrDecoderNotFound is returned when there is no decoder registered for a format. + ErrDecoderNotFound = encodingError("decoder not found for this format") + + // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. + ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") +) + +// DecoderRegistry can choose an appropriate Decoder based on the provided format. +type DecoderRegistry struct { + decoders map[string]Decoder + + mu sync.RWMutex +} + +// NewDecoderRegistry returns a new, initialized DecoderRegistry. +func NewDecoderRegistry() *DecoderRegistry { + return &DecoderRegistry{ + decoders: make(map[string]Decoder), + } +} + +// RegisterDecoder registers a Decoder for a format. +// Registering a Decoder for an already existing format is not supported. +func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.decoders[format]; ok { + return ErrDecoderFormatAlreadyRegistered + } + + e.decoders[format] = enc + + return nil +} + +// Decode calls the underlying Decoder based on the format. +func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]interface{}) error { + e.mu.RLock() + decoder, ok := e.decoders[format] + e.mu.RUnlock() + + if !ok { + return ErrDecoderNotFound + } + + return decoder.Decode(b, v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go new file mode 100644 index 0000000000..4485063b61 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go @@ -0,0 +1,61 @@ +package dotenv + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/subosito/gotenv" +) + +const keyDelimiter = "_" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for encoding data containing environment variables +// (commonly called as dotenv format). +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + var buf bytes.Buffer + + for _, key := range keys { + _, err := buf.WriteString(fmt.Sprintf("%v=%v\n", strings.ToUpper(key), flattened[key])) + if err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + var buf bytes.Buffer + + _, err := buf.Write(b) + if err != nil { + return err + } + + env, err := gotenv.StrictParse(&buf) + if err != nil { + return err + } + + for key, value := range env { + v[key] = value + } + + return nil +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go new file mode 100644 index 0000000000..ce6e6efa3e --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go @@ -0,0 +1,41 @@ +package dotenv + +import ( + "strings" + + "github.com/spf13/cast" +) + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go new file mode 100644 index 0000000000..2341bf2350 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/encoder.go @@ -0,0 +1,60 @@ +package encoding + +import ( + "sync" +) + +// Encoder encodes the contents of v into a byte representation. +// It's primarily used for encoding a map[string]interface{} into a file format. +type Encoder interface { + Encode(v map[string]interface{}) ([]byte, error) +} + +const ( + // ErrEncoderNotFound is returned when there is no encoder registered for a format. + ErrEncoderNotFound = encodingError("encoder not found for this format") + + // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. + ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") +) + +// EncoderRegistry can choose an appropriate Encoder based on the provided format. +type EncoderRegistry struct { + encoders map[string]Encoder + + mu sync.RWMutex +} + +// NewEncoderRegistry returns a new, initialized EncoderRegistry. +func NewEncoderRegistry() *EncoderRegistry { + return &EncoderRegistry{ + encoders: make(map[string]Encoder), + } +} + +// RegisterEncoder registers an Encoder for a format. +// Registering a Encoder for an already existing format is not supported. +func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { + e.mu.Lock() + defer e.mu.Unlock() + + if _, ok := e.encoders[format]; ok { + return ErrEncoderFormatAlreadyRegistered + } + + e.encoders[format] = enc + + return nil +} + +func (e *EncoderRegistry) Encode(format string, v map[string]interface{}) ([]byte, error) { + e.mu.RLock() + encoder, ok := e.encoders[format] + e.mu.RUnlock() + + if !ok { + return nil, ErrEncoderNotFound + } + + return encoder.Encode(v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go new file mode 100644 index 0000000000..e4cde02d7b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/error.go @@ -0,0 +1,7 @@ +package encoding + +type encodingError string + +func (e encodingError) Error() string { + return string(e) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go new file mode 100644 index 0000000000..7fde8e4bc6 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "bytes" + "encoding/json" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. +// TODO: add printer config to the codec? +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + + // TODO: use printer.Format? Is the trailing newline an issue? + + ast, err := hcl.Parse(string(b)) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + + err = printer.Fprint(&buf, ast.Node) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return hcl.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go new file mode 100644 index 0000000000..9acd87fc3c --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go @@ -0,0 +1,99 @@ +package ini + +import ( + "bytes" + "sort" + "strings" + + "github.com/spf13/cast" + "gopkg.in/ini.v1" +) + +// LoadOptions contains all customized options used for load data source(s). +// This type is added here for convenience: this way consumers can import a single package called "ini". +type LoadOptions = ini.LoadOptions + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. +type Codec struct { + KeyDelimiter string + LoadOptions LoadOptions +} + +func (c Codec) Encode(v map[string]interface{}) ([]byte, error) { + cfg := ini.Empty() + ini.PrettyFormat = false + + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + sectionName, keyName := "", key + + lastSep := strings.LastIndex(key, ".") + if lastSep != -1 { + sectionName = key[:(lastSep)] + keyName = key[(lastSep + 1):] + } + + // TODO: is this a good idea? + if sectionName == "default" { + sectionName = "" + } + + cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) + } + + var buf bytes.Buffer + + _, err := cfg.WriteTo(&buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c Codec) Decode(b []byte, v map[string]interface{}) error { + cfg := ini.Empty(c.LoadOptions) + + err := cfg.Append(b) + if err != nil { + return err + } + + sections := cfg.Sections() + + for i := 0; i < len(sections); i++ { + section := sections[i] + keys := section.Keys() + + for j := 0; j < len(keys); j++ { + key := keys[j] + value := cfg.Section(section.Name()).Key(key.Name()).String() + + deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) + + // set innermost value + deepestMap[key.Name()] = value + } + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go new file mode 100644 index 0000000000..8329856b5b --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go @@ -0,0 +1,74 @@ +package ini + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go new file mode 100644 index 0000000000..b8a2251c11 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go @@ -0,0 +1,86 @@ +package javaproperties + +import ( + "bytes" + "sort" + "strings" + + "github.com/magiconair/properties" + "github.com/spf13/cast" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. +type Codec struct { + KeyDelimiter string + + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + // TODO: drop this feature in v2 + // TODO: make use of the global properties object optional + Properties *properties.Properties +} + +func (c *Codec) Encode(v map[string]interface{}) ([]byte, error) { + if c.Properties == nil { + c.Properties = properties.NewProperties() + } + + flattened := map[string]interface{}{} + + flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) + + keys := make([]string, 0, len(flattened)) + + for key := range flattened { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) + if err != nil { + return nil, err + } + } + + var buf bytes.Buffer + + _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Codec) Decode(b []byte, v map[string]interface{}) error { + var err error + c.Properties, err = properties.Load(b, properties.UTF8) + if err != nil { + return err + } + + for _, key := range c.Properties.Keys() { + // ignore existence check: we know it's there + value, _ := c.Properties.Get(key) + + // recursively build nested maps + path := strings.Split(key, c.keyDelimiter()) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value + } + + return nil +} + +func (c Codec) keyDelimiter() string { + if c.KeyDelimiter == "" { + return "." + } + + return c.KeyDelimiter +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go new file mode 100644 index 0000000000..93755cac1a --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go @@ -0,0 +1,74 @@ +package javaproperties + +import ( + "strings" + + "github.com/spf13/cast" +) + +// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED +// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} + +// flattenAndMergeMap recursively flattens the given map into a new map +// Code is based on the function with the same name in tha main package. +// TODO: move it to a common place +func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { + if shadow != nil && prefix != "" && shadow[prefix] != nil { + // prefix is shadowed => nothing more to flatten + return shadow + } + if shadow == nil { + shadow = make(map[string]interface{}) + } + + var m2 map[string]interface{} + if prefix != "" { + prefix += delimiter + } + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = val + continue + } + // recursively merge to shadow map + shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) + } + return shadow +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go new file mode 100644 index 0000000000..1b7caaceb5 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -0,0 +1,17 @@ +package json + +import ( + "encoding/json" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + // TODO: expose prefix and indent in the Codec as setting? + return json.MarshalIndent(v, "", " ") +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return json.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go new file mode 100644 index 0000000000..45fddc8b59 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -0,0 +1,39 @@ +//go:build viper_toml1 +// +build viper_toml1 + +package toml + +import ( + "github.com/pelletier/go-toml" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + t, err := toml.TreeFromMap(v) + if err != nil { + return nil, err + } + + s, err := t.ToTomlString() + if err != nil { + return nil, err + } + + return []byte(s), nil +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + tree, err := toml.LoadBytes(b) + if err != nil { + return err + } + + tmap := tree.ToMap() + for key, value := range tmap { + v[key] = value + } + + return nil +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go new file mode 100644 index 0000000000..112c6d3725 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go @@ -0,0 +1,19 @@ +//go:build !viper_toml1 +// +build !viper_toml1 + +package toml + +import ( + "github.com/pelletier/go-toml/v2" +) + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + return toml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return toml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go new file mode 100644 index 0000000000..24cc19dfca --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -0,0 +1,14 @@ +package yaml + +// import "gopkg.in/yaml.v2" + +// Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. +type Codec struct{} + +func (Codec) Encode(v map[string]interface{}) ([]byte, error) { + return yaml.Marshal(v) +} + +func (Codec) Decode(b []byte, v map[string]interface{}) error { + return yaml.Unmarshal(b, &v) +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go new file mode 100644 index 0000000000..4c398c2f42 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go @@ -0,0 +1,14 @@ +//go:build viper_yaml2 +// +build viper_yaml2 + +package yaml + +import yamlv2 "gopkg.in/yaml.v2" + +var yaml = struct { + Marshal func(in interface{}) (out []byte, err error) + Unmarshal func(in []byte, out interface{}) (err error) +}{ + Marshal: yamlv2.Marshal, + Unmarshal: yamlv2.Unmarshal, +} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go new file mode 100644 index 0000000000..3a4775ced9 --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go @@ -0,0 +1,14 @@ +//go:build !viper_yaml2 +// +build !viper_yaml2 + +package yaml + +import yamlv3 "gopkg.in/yaml.v3" + +var yaml = struct { + Marshal func(in interface{}) (out []byte, err error) + Unmarshal func(in []byte, out interface{}) (err error) +}{ + Marshal: yamlv3.Marshal, + Unmarshal: yamlv3.Unmarshal, +} diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go new file mode 100644 index 0000000000..0115067ae6 --- /dev/null +++ b/vendor/github.com/spf13/viper/logger.go @@ -0,0 +1,77 @@ +package viper + +import ( + "fmt" + + jww "github.com/spf13/jwalterweatherman" +) + +// Logger is a unified interface for various logging use cases and practices, including: +// - leveled logging +// - structured logging +type Logger interface { + // Trace logs a Trace event. + // + // Even more fine-grained information than Debug events. + // Loggers not supporting this level should fall back to Debug. + Trace(msg string, keyvals ...interface{}) + + // Debug logs a Debug event. + // + // A verbose series of information events. + // They are useful when debugging the system. + Debug(msg string, keyvals ...interface{}) + + // Info logs an Info event. + // + // General information about what's happening inside the system. + Info(msg string, keyvals ...interface{}) + + // Warn logs a Warn(ing) event. + // + // Non-critical events that should be looked at. + Warn(msg string, keyvals ...interface{}) + + // Error logs an Error event. + // + // Critical events that require immediate attention. + // Loggers commonly provide Fatal and Panic levels above Error level, + // but exiting and panicing is out of scope for a logging library. + Error(msg string, keyvals ...interface{}) +} + +type jwwLogger struct{} + +func (jwwLogger) Trace(msg string, keyvals ...interface{}) { + jww.TRACE.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Debug(msg string, keyvals ...interface{}) { + jww.DEBUG.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Info(msg string, keyvals ...interface{}) { + jww.INFO.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Warn(msg string, keyvals ...interface{}) { + jww.WARN.Printf(jwwLogMessage(msg, keyvals...)) +} + +func (jwwLogger) Error(msg string, keyvals ...interface{}) { + jww.ERROR.Printf(jwwLogMessage(msg, keyvals...)) +} + +func jwwLogMessage(msg string, keyvals ...interface{}) string { + out := msg + + if len(keyvals) > 0 && len(keyvals)%2 == 1 { + keyvals = append(keyvals, nil) + } + + for i := 0; i <= len(keyvals)-2; i += 2 { + out = fmt.Sprintf("%s %v=%v", out, keyvals[i], keyvals[i+1]) + } + + return out +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index cee6b24296..ee7a86d9df 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -18,9 +18,7 @@ import ( "strings" "unicode" - "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" ) // ConfigParseError denotes failing to parse configuration file. @@ -88,26 +86,14 @@ func insensitiviseMap(m map[string]interface{}) { } } -func absPathify(inPath string) string { - jww.INFO.Println("Trying to resolve absolute path to", inPath) +func absPathify(logger Logger, inPath string) string { + logger.Info("trying to resolve absolute path", "path", inPath) if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { inPath = userHomeDir() + inPath[5:] } - if strings.HasPrefix(inPath, "$") { - end := strings.Index(inPath, string(os.PathSeparator)) - - var value, suffix string - if end == -1 { - value = os.Getenv(inPath[1:]) - } else { - value = os.Getenv(inPath[1:end]) - suffix = inPath[end:] - } - - inPath = value + suffix - } + inPath = os.ExpandEnv(inPath) if filepath.IsAbs(inPath) { return filepath.Clean(inPath) @@ -118,21 +104,9 @@ func absPathify(inPath string) string { return filepath.Clean(p) } - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) - return "" -} + logger.Error(fmt.Errorf("could not discover absolute path: %w", err).Error()) -// Check if file Exists -func exists(fs afero.Fs, path string) (bool, error) { - stat, err := fs.Stat(path) - if err == nil { - return !stat.IsDir(), nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err + return "" } func stringInSlice(a string, list []string) bool { diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 405dc20fe3..a3812e92f3 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -22,7 +22,6 @@ package viper import ( "bytes" "encoding/csv" - "encoding/json" "errors" "fmt" "io" @@ -30,23 +29,25 @@ import ( "os" "path/filepath" "reflect" + "strconv" "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" - "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" - "github.com/pelletier/go-toml" "github.com/spf13/afero" "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" - "github.com/subosito/gotenv" - "gopkg.in/ini.v1" - "gopkg.in/yaml.v2" + + "github.com/spf13/viper/internal/encoding" + "github.com/spf13/viper/internal/encoding/dotenv" + "github.com/spf13/viper/internal/encoding/hcl" + "github.com/spf13/viper/internal/encoding/ini" + "github.com/spf13/viper/internal/encoding/javaproperties" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -175,6 +176,8 @@ func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { // "user": "root", // "endpoint": "https://localhost" // } +// +// Note: Vipers are not safe for concurrent Get() and Set() operations. type Viper struct { // Delimiter that separates a list of keys // used to access a nested value in one go @@ -196,6 +199,9 @@ type Viper struct { configPermissions os.FileMode envPrefix string + // Specific commands for ini parsing + iniLoadOptions ini.LoadOptions + automaticEnvApplied bool envKeyReplacer StringReplacer allowEmptyEnv bool @@ -205,15 +211,17 @@ type Viper struct { defaults map[string]interface{} kvstore map[string]interface{} pflags map[string]FlagValue - env map[string]string + env map[string][]string aliases map[string]string typeByDefValue bool - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - properties *properties.Properties - onConfigChange func(fsnotify.Event) + + logger Logger + + // TODO: should probably be protected with a mutex + encoderRegistry *encoding.EncoderRegistry + decoderRegistry *encoding.DecoderRegistry } // New returns an initialized Viper instance. @@ -221,16 +229,19 @@ func New() *Viper { v := new(Viper) v.keyDelim = "." v.configName = "config" - v.configPermissions = os.FileMode(0644) + v.configPermissions = os.FileMode(0o644) v.fs = afero.NewOsFs() v.config = make(map[string]interface{}) v.override = make(map[string]interface{}) v.defaults = make(map[string]interface{}) v.kvstore = make(map[string]interface{}) v.pflags = make(map[string]FlagValue) - v.env = make(map[string]string) + v.env = make(map[string][]string) v.aliases = make(map[string]string) v.typeByDefValue = false + v.logger = jwwLogger{} + + v.resetEncoding() return v } @@ -278,6 +289,8 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } + v.resetEncoding() + return v } @@ -286,10 +299,88 @@ func NewWithOptions(opts ...Option) *Viper { // can use it in their testing as well. func Reset() { v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} } +// TODO: make this lazy initialization instead +func (v *Viper) resetEncoding() { + encoderRegistry := encoding.NewEncoderRegistry() + decoderRegistry := encoding.NewDecoderRegistry() + + { + codec := yaml.Codec{} + + encoderRegistry.RegisterEncoder("yaml", codec) + decoderRegistry.RegisterDecoder("yaml", codec) + + encoderRegistry.RegisterEncoder("yml", codec) + decoderRegistry.RegisterDecoder("yml", codec) + } + + { + codec := json.Codec{} + + encoderRegistry.RegisterEncoder("json", codec) + decoderRegistry.RegisterDecoder("json", codec) + } + + { + codec := toml.Codec{} + + encoderRegistry.RegisterEncoder("toml", codec) + decoderRegistry.RegisterDecoder("toml", codec) + } + + { + codec := hcl.Codec{} + + encoderRegistry.RegisterEncoder("hcl", codec) + decoderRegistry.RegisterDecoder("hcl", codec) + + encoderRegistry.RegisterEncoder("tfvars", codec) + decoderRegistry.RegisterDecoder("tfvars", codec) + } + + { + codec := ini.Codec{ + KeyDelimiter: v.keyDelim, + LoadOptions: v.iniLoadOptions, + } + + encoderRegistry.RegisterEncoder("ini", codec) + decoderRegistry.RegisterDecoder("ini", codec) + } + + { + codec := &javaproperties.Codec{ + KeyDelimiter: v.keyDelim, + } + + encoderRegistry.RegisterEncoder("properties", codec) + decoderRegistry.RegisterDecoder("properties", codec) + + encoderRegistry.RegisterEncoder("props", codec) + decoderRegistry.RegisterDecoder("props", codec) + + encoderRegistry.RegisterEncoder("prop", codec) + decoderRegistry.RegisterDecoder("prop", codec) + } + + { + codec := &dotenv.Codec{} + + encoderRegistry.RegisterEncoder("dotenv", codec) + decoderRegistry.RegisterDecoder("dotenv", codec) + + encoderRegistry.RegisterEncoder("env", codec) + decoderRegistry.RegisterDecoder("env", codec) + } + + v.encoderRegistry = encoderRegistry + v.decoderRegistry = decoderRegistry +} + type defaultRemoteProvider struct { provider string endpoint string @@ -325,7 +416,7 @@ type RemoteProvider interface { } // SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} @@ -341,7 +432,7 @@ func (v *Viper) WatchConfig() { initWG := sync.WaitGroup{} initWG.Add(1) go func() { - watcher, err := fsnotify.NewWatcher() + watcher, err := newWatcher() if err != nil { log.Fatal(err) } @@ -385,7 +476,7 @@ func (v *Viper) WatchConfig() { v.onConfigChange(event) } } else if filepath.Clean(event.Name) == configFile && - event.Op&fsnotify.Remove&fsnotify.Remove != 0 { + event.Op&fsnotify.Remove != 0 { eventsWG.Done() return } @@ -409,6 +500,7 @@ func (v *Viper) WatchConfig() { // SetConfigFile explicitly defines the path, name and extension of the config file. // Viper will use this and not check any of the config paths. func SetConfigFile(in string) { v.SetConfigFile(in) } + func (v *Viper) SetConfigFile(in string) { if in != "" { v.configFile = in @@ -419,6 +511,7 @@ func (v *Viper) SetConfigFile(in string) { // E.g. if your prefix is "spf", the env registry will look for env // variables that start with "SPF_". func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } + func (v *Viper) SetEnvPrefix(in string) { if in != "" { v.envPrefix = in @@ -437,6 +530,7 @@ func (v *Viper) mergeWithEnvPrefix(in string) string { // but empty environment variables as valid values instead of falling back. // For backward compatibility reasons this is false by default. func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } + func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { v.allowEmptyEnv = allowEmptyEnv } @@ -465,10 +559,12 @@ func (v *Viper) ConfigFileUsed() string { return v.configFile } // AddConfigPath adds a path for Viper to search for the config file in. // Can be called multiple times to define multiple search paths. func AddConfigPath(in string) { v.AddConfigPath(in) } + func (v *Viper) AddConfigPath(in string) { if in != "" { - absin := absPathify(in) - jww.INFO.Println("adding", absin, "to paths to search") + absin := absPathify(v.logger, in) + + v.logger.Info("adding path to search paths", "path", absin) if !stringInSlice(absin, v.configPaths) { v.configPaths = append(v.configPaths, absin) } @@ -486,12 +582,14 @@ func (v *Viper) AddConfigPath(in string) { func AddRemoteProvider(provider, endpoint, path string) error { return v.AddRemoteProvider(provider, endpoint, path) } + func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { if !stringInSlice(provider, SupportedRemoteProviders) { return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -523,7 +621,8 @@ func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring return UnsupportedRemoteProviderError(provider) } if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + rp := &defaultRemoteProvider{ endpoint: endpoint, provider: provider, @@ -577,9 +676,9 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac return nil } -// searchMapWithPathPrefixes recursively searches for a value for path in source map. +// searchIndexableWithPathPrefixes recursively searches for a value for path in source map/slice. // -// While searchMap() considers each path element as a single map key, this +// While searchMap() considers each path element as a single map key or slice index, this // function searches for, and prioritizes, merged path elements. // e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" // is also defined, this latter value is returned for path ["foo", "bar"]. @@ -588,7 +687,7 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac // in their keys). // // Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { +func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} { if len(path) == 0 { return source } @@ -597,28 +696,15 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path [] for i := len(path); i > 0; i-- { prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - next, ok := source[prefixKey] - if ok { - // Fast path - if i == len(path) { - return next - } - - // Nested case - var val interface{} - switch next.(type) { - case map[interface{}]interface{}: - val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - if val != nil { - return val - } + var val interface{} + switch sourceIndexable := source.(type) { + case []interface{}: + val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) + case map[string]interface{}: + val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) + } + if val != nil { + return val } } @@ -626,6 +712,76 @@ func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path [] return nil } +// searchSliceWithPathPrefixes searches for a value for path in sourceSlice +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchSliceWithPathPrefixes( + sourceSlice []interface{}, + prefixKey string, + pathIndex int, + path []string, +) interface{} { + // if the prefixKey is not a number or it is out of bounds of the slice + index, err := strconv.Atoi(prefixKey) + if err != nil || len(sourceSlice) <= index { + return nil + } + + next := sourceSlice[index] + + // Fast path + if pathIndex == len(path) { + return next + } + + switch n := next.(type) { + case map[interface{}]interface{}: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]interface{}, []interface{}: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + +// searchMapWithPathPrefixes searches for a value for path in sourceMap +// +// This function is part of the searchIndexableWithPathPrefixes recurring search and +// should not be called directly from functions other than searchIndexableWithPathPrefixes. +func (v *Viper) searchMapWithPathPrefixes( + sourceMap map[string]interface{}, + prefixKey string, + pathIndex int, + path []string, +) interface{} { + next, ok := sourceMap[prefixKey] + if !ok { + return nil + } + + // Fast path + if pathIndex == len(path) { + return next + } + + // Nested case + switch n := next.(type) { + case map[interface{}]interface{}: + return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) + case map[string]interface{}, []interface{}: + return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + + // not found + return nil +} + // isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere // on its path in the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” @@ -706,6 +862,7 @@ func (v *Viper) isPathShadowedInAutoEnv(path []string) string { // // "a b c" func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } + func (v *Viper) SetTypeByDefaultValue(enable bool) { v.typeByDefValue = enable } @@ -723,6 +880,7 @@ func GetViper() *Viper { // // Get returns an interface. For a specific value use one of the Get____ methods. func Get(key string) interface{} { return v.Get(key) } + func (v *Viper) Get(key string) interface{} { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, true) @@ -773,6 +931,7 @@ func (v *Viper) Get(key string) interface{} { // Sub returns new Viper instance representing a sub tree of this instance. // Sub is case-insensitive for a key. func Sub(key string) *Viper { return v.Sub(key) } + func (v *Viper) Sub(key string) *Viper { subv := New() data := v.Get(key) @@ -789,96 +948,112 @@ func (v *Viper) Sub(key string) *Viper { // GetString returns the value associated with the key as a string. func GetString(key string) string { return v.GetString(key) } + func (v *Viper) GetString(key string) string { return cast.ToString(v.Get(key)) } // GetBool returns the value associated with the key as a boolean. func GetBool(key string) bool { return v.GetBool(key) } + func (v *Viper) GetBool(key string) bool { return cast.ToBool(v.Get(key)) } // GetInt returns the value associated with the key as an integer. func GetInt(key string) int { return v.GetInt(key) } + func (v *Viper) GetInt(key string) int { return cast.ToInt(v.Get(key)) } // GetInt32 returns the value associated with the key as an integer. func GetInt32(key string) int32 { return v.GetInt32(key) } + func (v *Viper) GetInt32(key string) int32 { return cast.ToInt32(v.Get(key)) } // GetInt64 returns the value associated with the key as an integer. func GetInt64(key string) int64 { return v.GetInt64(key) } + func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } // GetUint returns the value associated with the key as an unsigned integer. func GetUint(key string) uint { return v.GetUint(key) } + func (v *Viper) GetUint(key string) uint { return cast.ToUint(v.Get(key)) } // GetUint32 returns the value associated with the key as an unsigned integer. func GetUint32(key string) uint32 { return v.GetUint32(key) } + func (v *Viper) GetUint32(key string) uint32 { return cast.ToUint32(v.Get(key)) } // GetUint64 returns the value associated with the key as an unsigned integer. func GetUint64(key string) uint64 { return v.GetUint64(key) } + func (v *Viper) GetUint64(key string) uint64 { return cast.ToUint64(v.Get(key)) } // GetFloat64 returns the value associated with the key as a float64. func GetFloat64(key string) float64 { return v.GetFloat64(key) } + func (v *Viper) GetFloat64(key string) float64 { return cast.ToFloat64(v.Get(key)) } // GetTime returns the value associated with the key as time. func GetTime(key string) time.Time { return v.GetTime(key) } + func (v *Viper) GetTime(key string) time.Time { return cast.ToTime(v.Get(key)) } // GetDuration returns the value associated with the key as a duration. func GetDuration(key string) time.Duration { return v.GetDuration(key) } + func (v *Viper) GetDuration(key string) time.Duration { return cast.ToDuration(v.Get(key)) } // GetIntSlice returns the value associated with the key as a slice of int values. func GetIntSlice(key string) []int { return v.GetIntSlice(key) } + func (v *Viper) GetIntSlice(key string) []int { return cast.ToIntSlice(v.Get(key)) } // GetStringSlice returns the value associated with the key as a slice of strings. func GetStringSlice(key string) []string { return v.GetStringSlice(key) } + func (v *Viper) GetStringSlice(key string) []string { return cast.ToStringSlice(v.Get(key)) } // GetStringMap returns the value associated with the key as a map of interfaces. func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } + func (v *Viper) GetStringMap(key string) map[string]interface{} { return cast.ToStringMap(v.Get(key)) } // GetStringMapString returns the value associated with the key as a map of strings. func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } + func (v *Viper) GetStringMapString(key string) map[string]string { return cast.ToStringMapString(v.Get(key)) } // GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } + func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { return cast.ToStringMapStringSlice(v.Get(key)) } @@ -886,6 +1061,7 @@ func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { // GetSizeInBytes returns the size of the value associated with the given key // in bytes. func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } + func (v *Viper) GetSizeInBytes(key string) uint { sizeStr := cast.ToString(v.Get(key)) return parseSizeInBytes(sizeStr) @@ -895,6 +1071,7 @@ func (v *Viper) GetSizeInBytes(key string) uint { func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { return v.UnmarshalKey(key, rawVal, opts...) } + func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) } @@ -904,6 +1081,7 @@ func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConf func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { return v.Unmarshal(rawVal, opts...) } + func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) } @@ -940,6 +1118,7 @@ func decode(input interface{}, config *mapstructure.DecoderConfig) error { func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { return v.UnmarshalExact(rawVal, opts...) } + func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { config := defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true @@ -950,6 +1129,7 @@ func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) // BindPFlags binds a full flag set to the configuration, using each flag's long // name as the config key. func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } + func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { return v.BindFlagValues(pflagValueSet{flags}) } @@ -961,13 +1141,18 @@ func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { // Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) // func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } + func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { + if flag == nil { + return fmt.Errorf("flag for %q is nil", key) + } return v.BindFlagValue(key, pflagValue{flag}) } // BindFlagValues binds a full FlagValue set to the configuration, using each flag's long // name as the config key. func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } + func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { flags.VisitAll(func(flag FlagValue) { if err = v.BindFlagValue(flag.Name(), flag); err != nil { @@ -979,6 +1164,7 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { // BindFlagValue binds a specific key to a FlagValue. func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } + func (v *Viper) BindFlagValue(key string, flag FlagValue) error { if flag == nil { return fmt.Errorf("flag for %q is nil", key) @@ -990,27 +1176,38 @@ func (v *Viper) BindFlagValue(key string, flag FlagValue) error { // BindEnv binds a Viper key to a ENV variable. // ENV variables are case sensitive. // If only a key is provided, it will use the env key matching the key, uppercased. +// If more arguments are provided, they will represent the env variable names that +// should bind to this key and will be taken in the specified order. // EnvPrefix will be used when set when env name is not provided. func BindEnv(input ...string) error { return v.BindEnv(input...) } + func (v *Viper) BindEnv(input ...string) error { - var key, envkey string if len(input) == 0 { return fmt.Errorf("missing key to bind to") } - key = strings.ToLower(input[0]) + key := strings.ToLower(input[0]) if len(input) == 1 { - envkey = v.mergeWithEnvPrefix(key) + v.env[key] = append(v.env[key], v.mergeWithEnvPrefix(key)) } else { - envkey = input[1] + v.env[key] = append(v.env[key], input[1:]...) } - v.env[key] = envkey - return nil } +// MustBindEnv wraps BindEnv in a panic. +// If there is an error binding an environment variable, MustBindEnv will +// panic. +func MustBindEnv(input ...string) { v.MustBindEnv(input...) } + +func (v *Viper) MustBindEnv(input ...string) { + if err := v.BindEnv(input...); err != nil { + panic(fmt.Sprintf("error while binding environment variable: %v", err)) + } +} + // Given a key, find the value. // // Viper will check to see if an alias exists first. @@ -1055,7 +1252,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1086,10 +1283,12 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return nil } } - envkey, exists := v.env[lcaseKey] + envkeys, exists := v.env[lcaseKey] if exists { - if val, ok := v.getEnv(envkey); ok { - return val + for _, envkey := range envkeys { + if val, ok := v.getEnv(envkey); ok { + return val + } } } if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { @@ -1097,7 +1296,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { } // Config file next - val = v.searchMapWithPathPrefixes(v.config, path) + val = v.searchIndexableWithPathPrefixes(v.config, path) if val != nil { return val } @@ -1132,7 +1331,7 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) - case "stringSlice": + case "stringSlice", "stringArray": s := strings.TrimPrefix(flag.ValueString(), "[") s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) @@ -1190,15 +1389,17 @@ func stringToStringConv(val string) interface{} { // IsSet checks to see if the key has been set in any of the data locations. // IsSet is case-insensitive for a key. func IsSet(key string) bool { return v.IsSet(key) } + func (v *Viper) IsSet(key string) bool { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, false) return val != nil } -// AutomaticEnv has Viper check ENV variables for all. -// keys set in config, default & flags +// AutomaticEnv makes Viper check if environment variables match any of the existing keys +// (config, default or flags). If matching env vars are found, they are loaded into Viper. func AutomaticEnv() { v.AutomaticEnv() } + func (v *Viper) AutomaticEnv() { v.automaticEnvApplied = true } @@ -1207,6 +1408,7 @@ func (v *Viper) AutomaticEnv() { // Useful for mapping an environmental variable to a key that does // not match it. func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } + func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { v.envKeyReplacer = r } @@ -1214,6 +1416,7 @@ func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { // RegisterAlias creates an alias that provides another accessor for the same key. // This enables one to change a name without breaking the application. func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } + func (v *Viper) RegisterAlias(alias string, key string) { v.registerAlias(alias, strings.ToLower(key)) } @@ -1246,14 +1449,15 @@ func (v *Viper) registerAlias(alias string, key string) { v.aliases[alias] = key } } else { - jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) + v.logger.Warn("creating circular reference alias", "alias", alias, "key", key, "real_key", v.realKey(key)) } } func (v *Viper) realKey(key string) string { newkey, exists := v.aliases[key] if exists { - jww.DEBUG.Println("Alias", key, "to", newkey) + v.logger.Debug("key is an alias", "alias", key, "to", newkey) + return v.realKey(newkey) } return key @@ -1261,18 +1465,22 @@ func (v *Viper) realKey(key string) string { // InConfig checks to see if the given key (or an alias) is in the config file. func InConfig(key string) bool { return v.InConfig(key) } + func (v *Viper) InConfig(key string) bool { + lcaseKey := strings.ToLower(key) + // if the requested key is an alias, then return the proper key - key = v.realKey(key) + lcaseKey = v.realKey(lcaseKey) + path := strings.Split(lcaseKey, v.keyDelim) - _, exists := v.config[key] - return exists + return v.searchIndexableWithPathPrefixes(v.config, path) != nil } // SetDefault sets the default value for this key. // SetDefault is case-insensitive for a key. // Default only used when no value is provided by the user via flag, config or ENV. func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } + func (v *Viper) SetDefault(key string, value interface{}) { // If alias passed in, then set the proper default key = v.realKey(strings.ToLower(key)) @@ -1291,6 +1499,7 @@ func (v *Viper) SetDefault(key string, value interface{}) { // Will be used instead of values obtained via // flags, config file, ENV, default, or key/value store. func Set(key string, value interface{}) { v.Set(key, value) } + func (v *Viper) Set(key string, value interface{}) { // If alias passed in, then set the proper override key = v.realKey(strings.ToLower(key)) @@ -1307,8 +1516,9 @@ func (v *Viper) Set(key string, value interface{}) { // ReadInConfig will discover and load the configuration file from disk // and key/value stores, searching in one of the defined paths. func ReadInConfig() error { return v.ReadInConfig() } + func (v *Viper) ReadInConfig() error { - jww.INFO.Println("Attempting to read in config file") + v.logger.Info("attempting to read in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1318,7 +1528,7 @@ func (v *Viper) ReadInConfig() error { return UnsupportedConfigError(v.getConfigType()) } - jww.DEBUG.Println("Reading file: ", filename) + v.logger.Debug("reading file", "file", filename) file, err := afero.ReadFile(v.fs, filename) if err != nil { return err @@ -1337,8 +1547,9 @@ func (v *Viper) ReadInConfig() error { // MergeInConfig merges a new configuration with an existing config. func MergeInConfig() error { return v.MergeInConfig() } + func (v *Viper) MergeInConfig() error { - jww.INFO.Println("Attempting to merge in config file") + v.logger.Info("attempting to merge in config file") filename, err := v.getConfigFile() if err != nil { return err @@ -1359,6 +1570,7 @@ func (v *Viper) MergeInConfig() error { // ReadConfig will read a configuration file, setting existing keys to nil if the // key does not exist in the file. func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } + func (v *Viper) ReadConfig(in io.Reader) error { v.config = make(map[string]interface{}) return v.unmarshalReader(in, v.config) @@ -1366,6 +1578,7 @@ func (v *Viper) ReadConfig(in io.Reader) error { // MergeConfig merges a new configuration with an existing config. func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } + func (v *Viper) MergeConfig(in io.Reader) error { cfg := make(map[string]interface{}) if err := v.unmarshalReader(in, cfg); err != nil { @@ -1377,6 +1590,7 @@ func (v *Viper) MergeConfig(in io.Reader) error { // MergeConfigMap merges the configuration from the map given with an existing config. // Note that the map given may be modified. func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } + func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { if v.config == nil { v.config = make(map[string]interface{}) @@ -1388,6 +1602,7 @@ func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { // WriteConfig writes the current configuration to a file. func WriteConfig() error { return v.WriteConfig() } + func (v *Viper) WriteConfig() error { filename, err := v.getConfigFile() if err != nil { @@ -1398,6 +1613,7 @@ func (v *Viper) WriteConfig() error { // SafeWriteConfig writes current configuration to file only if the file does not exist. func SafeWriteConfig() error { return v.SafeWriteConfig() } + func (v *Viper) SafeWriteConfig() error { if len(v.configPaths) < 1 { return errors.New("missing configuration for 'configPath'") @@ -1407,12 +1623,14 @@ func (v *Viper) SafeWriteConfig() error { // WriteConfigAs writes current configuration to a given filename. func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } + func (v *Viper) WriteConfigAs(filename string) error { return v.writeConfig(filename, true) } // SafeWriteConfigAs writes current configuration to a given filename if it does not exist. func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } + func (v *Viper) SafeWriteConfigAs(filename string) error { alreadyExists, err := afero.Exists(v.fs, filename) if alreadyExists && err == nil { @@ -1422,11 +1640,12 @@ func (v *Viper) SafeWriteConfigAs(filename string) error { } func (v *Viper) writeConfig(filename string, force bool) error { - jww.INFO.Println("Attempting to write configuration to file.") + v.logger.Info("attempting to write configuration to file") + var configType string ext := filepath.Ext(filename) - if ext != "" { + if ext != "" && ext != filepath.Base(filename) { configType = ext[1:] } else { configType = v.configType @@ -1463,81 +1682,17 @@ func (v *Viper) writeConfig(filename string, force bool) error { func unmarshalReader(in io.Reader, c map[string]interface{}) error { return v.unmarshalReader(in, c) } + func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch strings.ToLower(v.getConfigType()) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(buf.String()) + switch format := strings.ToLower(v.getConfigType()); format { + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": + err := v.decoderRegistry.Decode(format, buf.Bytes(), c) if err != nil { return ConfigParseError{err} } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) - if err != nil { - return ConfigParseError{err} - } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } - - case "dotenv", "env": - env, err := gotenv.StrictParse(buf) - if err != nil { - return ConfigParseError{err} - } - for k, v := range env { - c[k] = v - } - - case "properties", "props", "prop": - v.properties = properties.NewProperties() - var err error - if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range v.properties.Keys() { - value, _ := v.properties.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - - case "ini": - cfg := ini.Empty() - err := cfg.Append(buf.Bytes()) - if err != nil { - return ConfigParseError{err} - } - sections := cfg.Sections() - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - c[section.Name()+"."+key.Name()] = value - } - } } insensitiviseMap(c) @@ -1548,92 +1703,16 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { func (v *Viper) marshalWriter(f afero.File, configType string) error { c := v.AllSettings() switch configType { - case "json": - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) + case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": + b, err := v.encoderRegistry.Encode(configType, c) if err != nil { return ConfigMarshalError{err} } - case "hcl": - b, err := json.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - ast, err := hcl.Parse(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - err = printer.Fprint(f, ast.Node) - if err != nil { - return ConfigMarshalError{err} - } - - case "prop", "props", "properties": - if v.properties == nil { - v.properties = properties.NewProperties() - } - p := v.properties - for _, key := range v.AllKeys() { - _, _, err := p.Set(key, v.GetString(key)) - if err != nil { - return ConfigMarshalError{err} - } - } - _, err := p.WriteComment(f, "#", properties.UTF8) - if err != nil { - return ConfigMarshalError{err} - } - - case "dotenv", "env": - lines := []string{} - for _, key := range v.AllKeys() { - envName := strings.ToUpper(strings.Replace(key, ".", "_", -1)) - val := v.Get(key) - lines = append(lines, fmt.Sprintf("%v=%v", envName, val)) - } - s := strings.Join(lines, "\n") - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "toml": - t, err := toml.TreeFromMap(c) - if err != nil { - return ConfigMarshalError{err} - } - s := t.String() - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "yaml", "yml": - b, err := yaml.Marshal(c) + _, err = f.WriteString(string(b)) if err != nil { return ConfigMarshalError{err} } - if _, err = f.WriteString(string(b)); err != nil { - return ConfigMarshalError{err} - } - - case "ini": - keys := v.AllKeys() - cfg := ini.Empty() - ini.PrettyFormat = false - for i := 0; i < len(keys); i++ { - key := keys[i] - lastSep := strings.LastIndex(key, ".") - sectionName := key[:(lastSep)] - keyName := key[(lastSep + 1):] - if sectionName == "default" { - sectionName = "" - } - cfg.Section(sectionName).Key(keyName).SetValue(v.Get(key).(string)) - } - cfg.WriteTo(f) } return nil } @@ -1650,7 +1729,8 @@ func keyExists(k string, m map[string]interface{}) string { } func castToMapStringInterface( - src map[interface{}]interface{}) map[string]interface{} { + src map[interface{}]interface{}, +) map[string]interface{} { tgt := map[string]interface{}{} for k, v := range src { tgt[fmt.Sprintf("%v", k)] = v @@ -1658,6 +1738,14 @@ func castToMapStringInterface( return tgt } +func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + func castMapStringToMapInterface(src map[string]string) map[string]interface{} { tgt := map[string]interface{}{} for k, v := range src { @@ -1680,11 +1768,12 @@ func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} // deep. Both map types are supported as there is a go-yaml fork that uses // `map[string]interface{}` instead. func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { + src, tgt map[string]interface{}, itgt map[interface{}]interface{}, +) { for sk, sv := range src { tk := keyExists(sk, tgt) if tk == "" { - jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) + v.logger.Trace("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1694,7 +1783,7 @@ func mergeMaps( tv, ok := tgt[tk] if !ok { - jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) + v.logger.Trace("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1704,28 +1793,52 @@ func mergeMaps( svType := reflect.TypeOf(sv) tvType := reflect.TypeOf(tv) - if svType != tvType { - jww.ERROR.Printf( - "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - continue - } - jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) + v.logger.Trace( + "processing", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) switch ttv := tv.(type) { case map[interface{}]interface{}: - jww.TRACE.Printf("merging maps (must convert)") - tsv := sv.(map[interface{}]interface{}) + v.logger.Trace("merging maps (must convert)") + tsv, ok := sv.(map[interface{}]interface{}) + if !ok { + v.logger.Error( + "Could not cast sv to map[interface{}]interface{}", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) + continue + } + ssv := castToMapStringInterface(tsv) stv := castToMapStringInterface(ttv) mergeMaps(ssv, stv, ttv) case map[string]interface{}: - jww.TRACE.Printf("merging maps") - mergeMaps(sv.(map[string]interface{}), ttv, nil) + v.logger.Trace("merging maps") + tsv, ok := sv.(map[string]interface{}) + if !ok { + v.logger.Error( + "Could not cast sv to map[string]interface{}", + "key", sk, + "st", svType, + "tt", tvType, + "sv", sv, + "tv", tv, + ) + continue + } + mergeMaps(tsv, ttv, nil) default: - jww.TRACE.Printf("setting value") + v.logger.Trace("setting value") tgt[tk] = sv if itgt != nil { itgt[tk] = sv @@ -1737,6 +1850,7 @@ func mergeMaps( // ReadRemoteConfig attempts to get configuration from a remote source // and read it in the remote configuration registry. func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + func (v *Viper) ReadRemoteConfig() error { return v.getKeyValueConfig() } @@ -1759,9 +1873,13 @@ func (v *Viper) getKeyValueConfig() error { for _, rp := range v.remoteProviders { val, err := v.getRemoteConfig(rp) if err != nil { + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) + continue } + v.kvstore = val + return nil } return RemoteConfigError("No Files Found") @@ -1818,13 +1936,14 @@ func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface // AllKeys returns all keys holding a value, regardless of where they are set. // Nested keys are returned with a v.keyDelim separator func AllKeys() []string { return v.AllKeys() } + func (v *Viper) AllKeys() []string { m := map[string]bool{} // add all paths, by order of descending priority to ensure correct shadowing m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") m = v.flattenAndMergeMap(m, v.override, "") m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) + m = v.mergeFlatMap(m, castMapStringSliceToMapInterface(v.env)) m = v.flattenAndMergeMap(m, v.config, "") m = v.flattenAndMergeMap(m, v.kvstore, "") m = v.flattenAndMergeMap(m, v.defaults, "") @@ -1898,6 +2017,7 @@ outer: // AllSettings merges all settings and returns them as a map[string]interface{}. func AllSettings() map[string]interface{} { return v.AllSettings() } + func (v *Viper) AllSettings() map[string]interface{} { m := map[string]interface{}{} // start from the list of keys, and construct the map one value at a time @@ -1919,6 +2039,7 @@ func (v *Viper) AllSettings() map[string]interface{} { // SetFs sets the filesystem to use to read configuration. func SetFs(fs afero.Fs) { v.SetFs(fs) } + func (v *Viper) SetFs(fs afero.Fs) { v.fs = fs } @@ -1926,6 +2047,7 @@ func (v *Viper) SetFs(fs afero.Fs) { // SetConfigName sets name for the config file. // Does not include extension. func SetConfigName(in string) { v.SetConfigName(in) } + func (v *Viper) SetConfigName(in string) { if in != "" { v.configName = in @@ -1936,6 +2058,7 @@ func (v *Viper) SetConfigName(in string) { // SetConfigType sets the type of the configuration returned by the // remote source, e.g. "json". func SetConfigType(in string) { v.SetConfigType(in) } + func (v *Viper) SetConfigType(in string) { if in != "" { v.configType = in @@ -1944,10 +2067,18 @@ func (v *Viper) SetConfigType(in string) { // SetConfigPermissions sets the permissions for the config file. func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) } + func (v *Viper) SetConfigPermissions(perm os.FileMode) { v.configPermissions = perm.Perm() } +// IniLoadOptions sets the load options for ini parsing. +func IniLoadOptions(in ini.LoadOptions) Option { + return optionFunc(func(v *Viper) { + v.iniLoadOptions = in + }) +} + func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType @@ -1978,42 +2109,10 @@ func (v *Viper) getConfigFile() (string, error) { return v.configFile, nil } -func (v *Viper) searchInPath(in string) (filename string) { - jww.DEBUG.Println("Searching for config in ", in) - for _, ext := range SupportedExts { - jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - if v.configType != "" { - if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { - return filepath.Join(in, v.configName) - } - } - - return "" -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - // Debug prints all configuration registries for debugging // purposes. func Debug() { v.Debug() } + func (v *Viper) Debug() { fmt.Printf("Aliases:\n%#v\n", v.aliases) fmt.Printf("Override:\n%#v\n", v.override) diff --git a/vendor/github.com/spf13/viper/viper_go1_15.go b/vendor/github.com/spf13/viper/viper_go1_15.go new file mode 100644 index 0000000000..19a771cbda --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_go1_15.go @@ -0,0 +1,57 @@ +//go:build !go1.16 || !finder +// +build !go1.16 !finder + +package viper + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + v.logger.Info("searching for config in paths", "paths", v.configPaths) + + for _, cp := range v.configPaths { + file := v.searchInPath(cp) + if file != "" { + return file, nil + } + } + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} +} + +func (v *Viper) searchInPath(in string) (filename string) { + v.logger.Debug("searching for config in path", "path", in) + for _, ext := range SupportedExts { + v.logger.Debug("checking if file exists", "file", filepath.Join(in, v.configName+"."+ext)) + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { + v.logger.Debug("found file", "file", filepath.Join(in, v.configName+"."+ext)) + return filepath.Join(in, v.configName+"."+ext) + } + } + + if v.configType != "" { + if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b { + return filepath.Join(in, v.configName) + } + } + + return "" +} + +// Check if file Exists +func exists(fs afero.Fs, path string) (bool, error) { + stat, err := fs.Stat(path) + if err == nil { + return !stat.IsDir(), nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/spf13/viper/viper_go1_16.go b/vendor/github.com/spf13/viper/viper_go1_16.go new file mode 100644 index 0000000000..e10172fa3f --- /dev/null +++ b/vendor/github.com/spf13/viper/viper_go1_16.go @@ -0,0 +1,32 @@ +//go:build go1.16 && finder +// +build go1.16,finder + +package viper + +import ( + "fmt" + + "github.com/spf13/afero" +) + +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). +func (v *Viper) findConfigFile() (string, error) { + finder := finder{ + paths: v.configPaths, + fileNames: []string{v.configName}, + extensions: SupportedExts, + withoutExtension: v.configType != "", + } + + file, err := finder.Find(afero.NewIOFS(v.fs)) + if err != nil { + return "", err + } + + if file == "" { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + return file, nil +} diff --git a/vendor/github.com/spf13/viper/watch.go b/vendor/github.com/spf13/viper/watch.go new file mode 100644 index 0000000000..b5523b8f9d --- /dev/null +++ b/vendor/github.com/spf13/viper/watch.go @@ -0,0 +1,12 @@ +//go:build !js +// +build !js + +package viper + +import "github.com/fsnotify/fsnotify" + +type watcher = fsnotify.Watcher + +func newWatcher() (*watcher, error) { + return fsnotify.NewWatcher() +} diff --git a/vendor/github.com/spf13/viper/watch_wasm.go b/vendor/github.com/spf13/viper/watch_wasm.go new file mode 100644 index 0000000000..8e47e6a910 --- /dev/null +++ b/vendor/github.com/spf13/viper/watch_wasm.go @@ -0,0 +1,30 @@ +// +build js,wasm + +package viper + +import ( + "errors" + + "github.com/fsnotify/fsnotify" +) + +type watcher struct { + Events chan fsnotify.Event + Errors chan error +} + +func (*watcher) Close() error { + return nil +} + +func (*watcher) Add(name string) error { + return nil +} + +func (*watcher) Remove(name string) error { + return nil +} + +func newWatcher() (*watcher, error) { + return &watcher{}, errors.New("fsnotify is not supported on WASM") +} diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/LICENSE b/vendor/github.com/stbenjam/no-sprintf-host-port/LICENSE new file mode 100644 index 0000000000..586dfd8cce --- /dev/null +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Stephen Benjamin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go new file mode 100644 index 0000000000..374bb0d242 --- /dev/null +++ b/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -0,0 +1,96 @@ +package analyzer + +import ( + "fmt" + "go/ast" + "go/token" + "regexp" + + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "golang.org/x/tools/go/analysis" +) + +var Analyzer = &analysis.Analyzer{ + Name: "nosprintfhostport", + Doc: "Checks for misuse of Sprintf to construct a host with port in a URL.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + callExpr := node.(*ast.CallExpr) + if p, f, ok := getCallExprFunction(callExpr); ok && p == "fmt" && f == "Sprintf" { + if err := checkForHostPortConstruction(callExpr); err != nil { + pass.Reportf(node.Pos(), err.Error()) + } + } + }) + + return nil, nil +} + +// getCallExprFunction returns the package and function name from a callExpr, if any. +func getCallExprFunction(callExpr *ast.CallExpr) (pkg string, fn string, result bool) { + selector, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + return "", "", false + } + gopkg, ok := selector.X.(*ast.Ident) + if !ok { + return "", "", false + } + return gopkg.Name, selector.Sel.Name, true +} + +// getStringLiteral returns the value at a position if it's a string literal. +func getStringLiteral(args []ast.Expr, pos int) (string, bool) { + if len(args) < pos + 1 { + return "", false + } + + // Let's see if our format string is a string literal. + fsRaw, ok := args[pos].(*ast.BasicLit) + if !ok { + return "", false + } + if fsRaw.Kind == token.STRING && len(fsRaw.Value) >= 2 { + return fsRaw.Value[1 : len(fsRaw.Value)-1], true + } else { + return "", false + } +} + +// checkForHostPortConstruction checks to see if a sprintf call looks like a URI with a port, +// essentially scheme://%s:, or scheme://user:pass@%s:. +// +// Matching requirements: +// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// - A format string substitution in the host portion, preceded by an optional username/password@ +// - A colon indicating a port will be specified +func checkForHostPortConstruction(sprintf *ast.CallExpr) error { + fs, ok := getStringLiteral(sprintf.Args, 0) + if !ok { + return nil + } + + regexes := []*regexp.Regexp{ + regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+-.]*://%s:[^@]*$`), // URL without basic auth user + regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9+-.]*://[^/]*@%s:.*$`), // URL with basic auth + } + + for _, re := range regexes { + if re.MatchString(fs) { + return fmt.Errorf("host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf") + } + } + + return nil +} \ No newline at end of file diff --git a/vendor/github.com/t-yuki/gocover-cobertura/.travis.yml b/vendor/github.com/t-yuki/gocover-cobertura/.travis.yml new file mode 100644 index 0000000000..f17bb61693 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/.travis.yml @@ -0,0 +1,12 @@ +language: go +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +sudo: false +before_install: + - go get github.com/mattn/goveralls +script: + - $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/t-yuki/gocover-cobertura/LICENSE b/vendor/github.com/t-yuki/gocover-cobertura/LICENSE new file mode 100644 index 0000000000..7ec1b3d853 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013 Yukinari Toyota + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/t-yuki/gocover-cobertura/README.md b/vendor/github.com/t-yuki/gocover-cobertura/README.md new file mode 100644 index 0000000000..60ab1dbe72 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/README.md @@ -0,0 +1,35 @@ +[![Build Status](https://travis-ci.org/t-yuki/gocover-cobertura.svg?branch=master)](https://travis-ci.org/t-yuki/gocover-cobertura) +[![Coverage Status](https://coveralls.io/repos/github/t-yuki/gocover-cobertura/badge.svg?branch=master)](https://coveralls.io/github/t-yuki/gocover-cobertura?branch=master) + +go tool cover XML (Cobertura) export +==================================== + +This is a simple helper tool for generating XML output in [Cobertura](http://cobertura.sourceforge.net/) format +for CIs like [Jenkins](https://wiki.jenkins-ci.org/display/JENKINS/Cobertura+Plugin) and others +from [go tool cover](https://code.google.com/p/go.tools/) output. + +Installation +------------ + +Just type the following to install the program and its dependencies: + + $ go get code.google.com/p/go.tools/cmd/cover + $ go get github.com/t-yuki/gocover-cobertura + +Usage +----- + +`gocover-cobertura` reads from the standard input: + + $ go test -coverprofile=coverage.txt -covermode count github.com/gorilla/mux + $ gocover-cobertura < coverage.txt > coverage.xml + +Authors +------- + +* [Yukinari Toyota (t-yuki)](https://github.com/t-yuki) + +Thanks +------ + +This tool is originated from [gocov-xml](https://github.com/AlekSi/gocov-xml) by [Alexey Palazhchenko (AlekSi)](https://github.com/AlekSi) diff --git a/vendor/github.com/t-yuki/gocover-cobertura/cobertura.go b/vendor/github.com/t-yuki/gocover-cobertura/cobertura.go new file mode 100644 index 0000000000..8556dc5636 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/cobertura.go @@ -0,0 +1,178 @@ +package main + +import ( + "encoding/xml" +) + +type Coverage struct { + XMLName xml.Name `xml:"coverage"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Version string `xml:"version,attr"` + Timestamp int64 `xml:"timestamp,attr"` + LinesCovered int64 `xml:"lines-covered,attr"` + LinesValid int64 `xml:"lines-valid,attr"` + BranchesCovered int64 `xml:"branches-covered,attr"` + BranchesValid int64 `xml:"branches-valid,attr"` + Complexity float32 `xml:"complexity,attr"` + Sources []*Source `xml:"sources>source"` + Packages []*Package `xml:"packages>package"` +} + +type Source struct { + Path string `xml:",chardata"` +} + +type Package struct { + Name string `xml:"name,attr"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Complexity float32 `xml:"complexity,attr"` + Classes []*Class `xml:"classes>class"` +} + +type Class struct { + Name string `xml:"name,attr"` + Filename string `xml:"filename,attr"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Complexity float32 `xml:"complexity,attr"` + Methods []*Method `xml:"methods>method"` + Lines Lines `xml:"lines>line"` +} + +type Method struct { + Name string `xml:"name,attr"` + Signature string `xml:"signature,attr"` + LineRate float32 `xml:"line-rate,attr"` + BranchRate float32 `xml:"branch-rate,attr"` + Complexity float32 `xml:"complexity,attr"` + Lines Lines `xml:"lines>line"` +} + +type Line struct { + Number int `xml:"number,attr"` + Hits int64 `xml:"hits,attr"` +} + +// Lines is a slice of Line pointers, with some convenience methods +type Lines []*Line + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (lines Lines) HitRate() (hitRate float32) { + return float32(lines.NumLinesWithHits()) / float32(len(lines)) +} + +// NumLines returns the number of lines +func (lines Lines) NumLines() int64 { + return int64(len(lines)) +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (lines Lines) NumLinesWithHits() (numLinesWithHits int64) { + for _, line := range lines { + if line.Hits > 0 { + numLinesWithHits++ + } + } + return numLinesWithHits +} + +// AddOrUpdateLine adds a line if it is a different line than the last line recorded. +// If it's the same line as the last line recorded then we update the hits down +// if the new hits is less; otherwise just leave it as-is +func (lines *Lines) AddOrUpdateLine(lineNumber int, hits int64) { + if len(*lines) > 0 { + lastLine := (*lines)[len(*lines)-1] + if lineNumber == lastLine.Number { + if hits < lastLine.Hits { + lastLine.Hits = hits + } + return + } + } + *lines = append(*lines, &Line{Number: lineNumber, Hits: hits}) +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (method Method) HitRate() float32 { + return method.Lines.HitRate() +} + +// NumLines returns the number of lines +func (method Method) NumLines() int64 { + return method.Lines.NumLines() +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (method Method) NumLinesWithHits() int64 { + return method.Lines.NumLinesWithHits() +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (class Class) HitRate() float32 { + return float32(class.NumLinesWithHits()) / float32(class.NumLines()) +} + +// NumLines returns the number of lines +func (class Class) NumLines() (numLines int64) { + for _, method := range class.Methods { + numLines += method.NumLines() + } + return numLines +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (class Class) NumLinesWithHits() (numLinesWithHits int64) { + for _, method := range class.Methods { + numLinesWithHits += method.NumLinesWithHits() + } + return numLinesWithHits +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (pkg Package) HitRate() float32 { + return float32(pkg.NumLinesWithHits()) / float32(pkg.NumLines()) +} + +// NumLines returns the number of lines +func (pkg Package) NumLines() (numLines int64) { + for _, class := range pkg.Classes { + numLines += class.NumLines() + } + return numLines +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (pkg Package) NumLinesWithHits() (numLinesWithHits int64) { + for _, class := range pkg.Classes { + numLinesWithHits += class.NumLinesWithHits() + } + return numLinesWithHits +} + +// HitRate returns a float32 from 0.0 to 1.0 representing what fraction of lines +// have hits +func (cov Coverage) HitRate() float32 { + return float32(cov.NumLinesWithHits()) / float32(cov.NumLines()) +} + +// NumLines returns the number of lines +func (cov Coverage) NumLines() (numLines int64) { + for _, pkg := range cov.Packages { + numLines += pkg.NumLines() + } + return numLines +} + +// NumLinesWithHits returns the number of lines with a hit count > 0 +func (cov Coverage) NumLinesWithHits() (numLinesWithHits int64) { + for _, pkg := range cov.Packages { + numLinesWithHits += pkg.NumLinesWithHits() + } + return numLinesWithHits +} diff --git a/vendor/github.com/t-yuki/gocover-cobertura/gocover-cobertura.go b/vendor/github.com/t-yuki/gocover-cobertura/gocover-cobertura.go new file mode 100644 index 0000000000..e64b5de029 --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/gocover-cobertura.go @@ -0,0 +1,176 @@ +package main + +import ( + "encoding/xml" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +const coberturaDTDDecl = "\n" + +func main() { + convert(os.Stdin, os.Stdout) +} + +func convert(in io.Reader, out io.Writer) { + profiles, err := ParseProfiles(in) + if err != nil { + panic("Can't parse profiles") + } + + srcDirs := build.Default.SrcDirs() + sources := make([]*Source, len(srcDirs)) + for i, dir := range srcDirs { + sources[i] = &Source{dir} + } + + coverage := Coverage{Sources: sources, Packages: nil, Timestamp: time.Now().UnixNano() / int64(time.Millisecond)} + coverage.parseProfiles(profiles) + + fmt.Fprintf(out, xml.Header) + fmt.Fprintf(out, coberturaDTDDecl) + + encoder := xml.NewEncoder(out) + encoder.Indent("", "\t") + err = encoder.Encode(coverage) + if err != nil { + panic(err) + } + + fmt.Fprintln(out) +} + +func (cov *Coverage) parseProfiles(profiles []*Profile) error { + cov.Packages = []*Package{} + for _, profile := range profiles { + cov.parseProfile(profile) + } + cov.LinesValid = cov.NumLines() + cov.LinesCovered = cov.NumLinesWithHits() + cov.LineRate = cov.HitRate() + return nil +} + +func (cov *Coverage) parseProfile(profile *Profile) error { + fileName := profile.FileName + absFilePath, err := findFile(fileName) + if err != nil { + return err + } + fset := token.NewFileSet() + parsed, err := parser.ParseFile(fset, absFilePath, nil, 0) + if err != nil { + return err + } + data, err := ioutil.ReadFile(absFilePath) + if err != nil { + return err + } + + pkgPath, _ := filepath.Split(fileName) + pkgPath = strings.TrimRight(pkgPath, string(os.PathSeparator)) + + var pkg *Package + for _, p := range cov.Packages { + if p.Name == pkgPath { + pkg = p + } + } + if pkg == nil { + pkg = &Package{Name: pkgPath, Classes: []*Class{}} + cov.Packages = append(cov.Packages, pkg) + } + visitor := &fileVisitor{ + fset: fset, + fileName: fileName, + fileData: data, + classes: make(map[string]*Class), + pkg: pkg, + profile: profile, + } + ast.Walk(visitor, parsed) + pkg.LineRate = pkg.HitRate() + return nil +} + +type fileVisitor struct { + fset *token.FileSet + fileName string + fileData []byte + pkg *Package + classes map[string]*Class + profile *Profile +} + +func (v *fileVisitor) Visit(node ast.Node) ast.Visitor { + switch n := node.(type) { + case *ast.FuncDecl: + class := v.class(n) + method := v.method(n) + method.LineRate = method.Lines.HitRate() + class.Methods = append(class.Methods, method) + for _, line := range method.Lines { + class.Lines = append(class.Lines, line) + } + class.LineRate = class.Lines.HitRate() + } + return v +} + +func (v *fileVisitor) method(n *ast.FuncDecl) *Method { + method := &Method{Name: n.Name.Name} + method.Lines = []*Line{} + + start := v.fset.Position(n.Pos()) + end := v.fset.Position(n.End()) + startLine := start.Line + startCol := start.Column + endLine := end.Line + endCol := end.Column + // The blocks are sorted, so we can stop counting as soon as we reach the end of the relevant block. + for _, b := range v.profile.Blocks { + if b.StartLine > endLine || (b.StartLine == endLine && b.StartCol >= endCol) { + // Past the end of the function. + break + } + if b.EndLine < startLine || (b.EndLine == startLine && b.EndCol <= startCol) { + // Before the beginning of the function + continue + } + for i := b.StartLine; i <= b.EndLine; i++ { + method.Lines.AddOrUpdateLine(i, int64(b.Count)) + } + } + return method +} + +func (v *fileVisitor) class(n *ast.FuncDecl) *Class { + className := v.recvName(n) + var class *Class = v.classes[className] + if class == nil { + class = &Class{Name: className, Filename: v.fileName, Methods: []*Method{}, Lines: []*Line{}} + v.classes[className] = class + v.pkg.Classes = append(v.pkg.Classes, class) + } + return class +} + +func (v *fileVisitor) recvName(n *ast.FuncDecl) string { + if n.Recv == nil { + return "-" + } + recv := n.Recv.List[0].Type + start := v.fset.Position(recv.Pos()) + end := v.fset.Position(recv.End()) + name := string(v.fileData[start.Offset:end.Offset]) + return strings.TrimSpace(strings.TrimLeft(name, "*")) +} diff --git a/vendor/github.com/t-yuki/gocover-cobertura/profile.go b/vendor/github.com/t-yuki/gocover-cobertura/profile.go new file mode 100644 index 0000000000..99cbac234c --- /dev/null +++ b/vendor/github.com/t-yuki/gocover-cobertura/profile.go @@ -0,0 +1,202 @@ +// Imported from https://code.google.com/p/go/source/browse/cmd/cover/profile.go?repo=tools&r=c10a9dd5e0b0a859a8385b6f004584cb083a3934 + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "fmt" + "go/build" + "io" + "math" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data from the given Reader and returns a +// Profile for each file. +func ParseProfiles(in io.Reader) ([]*Profile, error) { + files := make(map[string]*Profile) + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + s := bufio.NewScanner(in) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + m := lineRe.FindStringSubmatch(line) + if m == nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", m, lineRe) + } + fn := m[1] + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, ProfileBlock{ + StartLine: toInt(m[2]), + StartCol: toInt(m[3]), + EndLine: toInt(m[4]), + EndCol: toInt(m[5]), + NumStmt: toInt(m[6]), + Count: toInt(m[7]), + }) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +var lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`) + +func toInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return i +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count} + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + return !b[i].Start && b[j].Start + } + return b[i].Offset < b[j].Offset +} + +// findFile finds the location of the named file in GOROOT, GOPATH etc. +func findFile(file string) (string, error) { + if strings.HasPrefix(file, "_") { + file = file[1:] + } + if _, err := os.Stat(file); err == nil { + return file, nil + } + dir, file := filepath.Split(file) + pkg, err := build.Import(dir, ".", build.FindOnly) + if err != nil { + return "", fmt.Errorf("can't find %q: %v", file, err) + } + return filepath.Join(pkg.Dir, file), nil +} diff --git a/vendor/github.com/tetafro/godot/.golangci.yml b/vendor/github.com/tetafro/godot/.golangci.yml index 2b799b2653..ea380eb83d 100644 --- a/vendor/github.com/tetafro/godot/.golangci.yml +++ b/vendor/github.com/tetafro/godot/.golangci.yml @@ -8,60 +8,73 @@ skip-dirs: linters: disable-all: true enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck + - asciicheck - bodyclose - - depguard + - cyclop - dogsled - - dupl - - funlen + - durationcheck + - errcheck + - errname + - errorlint + - exhaustive + - exportloopref + - exportloopref - gochecknoinits + - gocognit - goconst - gocritic - gocyclo - godot + - goerr113 - gofmt - gofumpt - goimports - - golint - - gomnd - - gomodguard - goprintffuncname - gosec + - gosimple + - govet + - importas + - ineffassign - lll - - maligned - misspell - nakedret - nestif + - noctx + - nolintlint - prealloc + - revive - rowserrcheck - - scopelint + - sqlclosecheck + - sqlclosecheck + - staticcheck - stylecheck + - typecheck - unconvert - unparam + - unused + - wastedassign - whitespace + - wrapcheck linters-settings: godot: - check-all: true + scope: toplevel issues: exclude-use-default: false + exclude: + - "do not define dynamic errors, use wrapped static errors instead" exclude-rules: - path: _test\.go linters: - dupl - errcheck - funlen + - gocognit + - cyclop - gosec - - path: cmd/godot/main\.go + - noctx + - path: main\.go linters: + - cyclop - gomnd diff --git a/vendor/github.com/tetafro/godot/README.md b/vendor/github.com/tetafro/godot/README.md index 3f97b0e395..6b2e530b93 100644 --- a/vendor/github.com/tetafro/godot/README.md +++ b/vendor/github.com/tetafro/godot/README.md @@ -1,7 +1,7 @@ # godot [![License](http://img.shields.io/badge/license-MIT-green.svg?style=flat)](https://raw.githubusercontent.com/tetafro/godot/master/LICENSE) -[![Github CI](https://img.shields.io/github/workflow/status/tetafro/godot/Test)](https://github.com/tetafro/godot/actions?query=workflow%3ATest) +[![Github CI](https://img.shields.io/github/actions/workflow/status/tetafro/godot/push.yml)](https://github.com/tetafro/godot/actions) [![Go Report](https://goreportcard.com/badge/github.com/tetafro/godot)](https://goreportcard.com/report/github.com/tetafro/godot) [![Codecov](https://codecov.io/gh/tetafro/godot/branch/master/graph/badge.svg)](https://codecov.io/gh/tetafro/godot) @@ -21,7 +21,7 @@ end of the last sentence if needed. Build from source ```sh -go get -u github.com/tetafro/godot/cmd/godot +go install github.com/tetafro/godot/cmd/godot@latest ``` or download binary from [releases page](https://github.com/tetafro/godot/releases). diff --git a/vendor/github.com/tetafro/godot/checks.go b/vendor/github.com/tetafro/godot/checks.go index cba54f310c..0e53c220a2 100644 --- a/vendor/github.com/tetafro/godot/checks.go +++ b/vendor/github.com/tetafro/godot/checks.go @@ -21,7 +21,7 @@ var ( // Abbreviations to exclude from capital letters check. abbreviations = []string{"i.e.", "i. e.", "e.g.", "e. g.", "etc."} - // Special tags in comments like "// nolint:", or "// +k8s:". + // Special tags in comments like "//nolint:", or "//+k8s:". tags = regexp.MustCompile(`^\+?[a-z0-9]+:`) // Special hashtags in comments like "// #nosec". @@ -31,18 +31,24 @@ var ( endURL = regexp.MustCompile(`[a-z]+://[^\s]+$`) ) +// position is a position inside a comment (might be multiline comment). +type position struct { + line int // starts at 1 + column int // starts at 1, byte count +} + // checkComments checks every comment accordings to the rules from // `settings` argument. func checkComments(comments []comment, settings Settings) []Issue { - var issues []Issue // nolint: prealloc + var issues []Issue for _, c := range comments { if settings.Period { - if iss := checkCommentForPeriod(c); iss != nil { + if iss := checkPeriod(c); iss != nil { issues = append(issues, *iss) } } if settings.Capital { - if iss := checkCommentForCapital(c); len(iss) > 0 { + if iss := checkCapital(c); len(iss) > 0 { issues = append(issues, iss...) } } @@ -50,14 +56,34 @@ func checkComments(comments []comment, settings Settings) []Issue { return issues } -// checkCommentForPeriod checks that the last sentense of the comment ends +// checkPeriod checks that the last sentense of the comment ends // in a period. -func checkCommentForPeriod(c comment) *Issue { - pos, ok := checkPeriod(c.text) - if ok { +func checkPeriod(c comment) *Issue { + // Check last non-empty line + var found bool + var line string + var pos position + lines := strings.Split(c.text, "\n") + for i := len(lines) - 1; i >= 0; i-- { + line = strings.TrimRightFunc(lines[i], unicode.IsSpace) + if line == "" { + continue + } + found = true + pos.line = i + 1 + break + } + // All lines are empty + if !found { + return nil + } + // Correct line + if hasSuffix(line, lastChars) { return nil } + pos.column = len(line) + 1 + // Shift position to its real value. `c.text` doesn't contain comment's // special symbols: /* or //, and line indentations inside. It also // contains */ in the end in case of block comment. @@ -94,95 +120,15 @@ func checkCommentForPeriod(c comment) *Issue { return &iss } -// checkCommentForCapital checks that each sentense of the comment starts with -// a capital letter. -// nolint: unparam -func checkCommentForCapital(c comment) []Issue { - pp := checkCapital(c.text, c.decl) - if len(pp) == 0 { - return nil - } - - issues := make([]Issue, len(pp)) - for i, pos := range pp { - // Shift position by the length of comment's special symbols: /* or // - isBlock := strings.HasPrefix(c.lines[0], "/*") - if (isBlock && pos.line == 1) || !isBlock { - pos.column += 2 - } - - iss := Issue{ - Pos: token.Position{ - Filename: c.start.Filename, - Offset: c.start.Offset, - Line: pos.line + c.start.Line - 1, - Column: pos.column + c.start.Column - 1, - }, - Message: noCapitalMessage, - } - - // Make a replacement. Use `pos.original` to get an original original from - // attached lines. Use `iss.Pos.Column` because it's a position in - // the original original. - original := c.lines[pos.line-1] - col := byteToRuneColumn(original, iss.Pos.Column) - 1 - rep := string(unicode.ToTitle([]rune(original)[col])) // capital letter - if len(original) < iss.Pos.Column-1+len(rep) { - // This should never happen. Avoid panics, skip this check. - continue - } - iss.Replacement = original[:iss.Pos.Column-1] + rep + - original[iss.Pos.Column-1+len(rep):] - - // Save replacement to raw lines to be able to combine it with - // further replacements - c.lines[pos.line-1] = iss.Replacement - - issues[i] = iss - } - - return issues -} - -// checkPeriod checks that the last sentense of the text ends in a period. -// NOTE: Returned position is a position inside given text, not in the -// original file. -func checkPeriod(comment string) (pos position, ok bool) { - // Check last non-empty line - var found bool - var line string - lines := strings.Split(comment, "\n") - for i := len(lines) - 1; i >= 0; i-- { - line = strings.TrimRightFunc(lines[i], unicode.IsSpace) - if line == "" { - continue - } - found = true - pos.line = i + 1 - break - } - // All lines are empty - if !found { - return position{}, true - } - // Correct line - if hasSuffix(line, lastChars) { - return position{}, true - } - - pos.column = len(line) + 1 - return pos, false -} - -// checkCapital checks that each sentense of the text starts with +// checkCapital checks that each sentense of the comment starts with // a capital letter. -// NOTE: First letter is not checked in declaration comments, because they -// can describe unexported functions, which start with small letter. -func checkCapital(comment string, skipFirst bool) (pp []position) { +// +//nolint:cyclop,funlen +func checkCapital(c comment) []Issue { // Remove common abbreviations from the comment for _, abbr := range abbreviations { repl := strings.ReplaceAll(abbr, ".", "_") - comment = strings.ReplaceAll(comment, abbr, repl) + c.text = strings.ReplaceAll(c.text, abbr, repl) } // List of states during the scan: `empty` - nothing special, @@ -190,12 +136,14 @@ func checkCapital(comment string, skipFirst bool) (pp []position) { // `endOfSentence` - found `endChar`, and then space or newline. const empty, endChar, endOfSentence = 1, 2, 3 + var pp []position pos := position{line: 1} state := endOfSentence - if skipFirst { + if c.decl { + // Skip first state = empty } - for _, r := range comment { + for _, r := range c.text { s := string(r) pos.column++ @@ -223,12 +171,54 @@ func checkCapital(comment string, skipFirst bool) (pp []position) { if state == endOfSentence && unicode.IsLower(r) { pp = append(pp, position{ line: pos.line, - column: runeToByteColumn(comment, pos.column), + column: runeToByteColumn(c.text, pos.column), }) } state = empty } - return pp + if len(pp) == 0 { + return nil + } + + issues := make([]Issue, len(pp)) + for i, pos := range pp { + // Shift position by the length of comment's special symbols: /* or // + isBlock := strings.HasPrefix(c.lines[0], "/*") + if (isBlock && pos.line == 1) || !isBlock { + pos.column += 2 + } + + iss := Issue{ + Pos: token.Position{ + Filename: c.start.Filename, + Offset: c.start.Offset, + Line: pos.line + c.start.Line - 1, + Column: pos.column + c.start.Column - 1, + }, + Message: noCapitalMessage, + } + + // Make a replacement. Use `pos.original` to get an original original from + // attached lines. Use `iss.Pos.Column` because it's a position in + // the original original. + original := c.lines[pos.line-1] + col := byteToRuneColumn(original, iss.Pos.Column) - 1 + rep := string(unicode.ToTitle([]rune(original)[col])) // capital letter + if len(original) < iss.Pos.Column-1+len(rep) { + // This should never happen. Avoid panics, skip this check. + continue + } + iss.Replacement = original[:iss.Pos.Column-1] + rep + + original[iss.Pos.Column-1+len(rep):] + + // Save replacement to raw lines to be able to combine it with + // further replacements + c.lines[pos.line-1] = iss.Replacement + + issues[i] = iss + } + + return issues } // isSpecialBlock checks that given block of comment lines is special and @@ -240,10 +230,13 @@ func isSpecialBlock(comment string) bool { strings.Contains(comment, "#define")) { return true } + if strings.HasPrefix(comment, "// Output: ") { + return true + } return false } -// isSpecialBlock checks that given comment line is special and +// isSpecialLine checks that given comment line is special and // shouldn't be checked as a regular sentence. func isSpecialLine(comment string) bool { // Skip cgo export tags: https://golang.org/cmd/cgo/#hdr-C_references_to_Go diff --git a/vendor/github.com/tetafro/godot/getters.go b/vendor/github.com/tetafro/godot/getters.go index 6153772bdf..7d3d22fb13 100644 --- a/vendor/github.com/tetafro/godot/getters.go +++ b/vendor/github.com/tetafro/godot/getters.go @@ -5,7 +5,7 @@ import ( "fmt" "go/ast" "go/token" - "io/ioutil" + "os" "regexp" "strings" ) @@ -44,7 +44,7 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { // from "go/format" won't help here if the original file is not gofmt-ed. pf.lines, err = readFile(file, fset) if err != nil { - return nil, fmt.Errorf("read file: %v", err) + return nil, fmt.Errorf("read file: %w", err) } // Dirty hack. For some cases Go generates temporary files during @@ -58,9 +58,13 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { return nil, errUnsuitableInput } - // Check consistency to avoid checking slice indexes in each function + // Check consistency to avoid checking slice indexes in each function. + // Note that `PositionFor` is used with `adjusted=false` to skip `//line` + // directives that can set references to other files (e.g. templates) + // instead of the real ones, and break consistency here. + // Issue: https://github.com/tetafro/godot/issues/32 lastComment := pf.file.Comments[len(pf.file.Comments)-1] - if p := pf.fset.Position(lastComment.End()); len(pf.lines) < p.Line { + if p := pf.fset.PositionFor(lastComment.End(), false); len(pf.lines) < p.Line { return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) } @@ -82,7 +86,7 @@ func (pf *parsedFile) getComments(scope Scope, exclude []*regexp.Regexp) []comme pf.getBlockComments(exclude), pf.getTopLevelComments(exclude)..., ) - default: + case DeclScope: // Top level declaration comments and comments from the inside // of top level blocks comments = append(pf.getBlockComments(exclude), decl...) @@ -118,7 +122,7 @@ func (pf *parsedFile) getBlockComments(exclude []*regexp.Regexp) []comment { // Skip comments that are not top-level for this block // (the block itself is top level, so comments inside this block // would be on column 2) - // nolint: gomnd + //nolint:gomnd if pf.fset.Position(c.Pos()).Column != 2 { continue } @@ -136,7 +140,7 @@ func (pf *parsedFile) getBlockComments(exclude []*regexp.Regexp) []comment { // getTopLevelComments gets all top level comments. func (pf *parsedFile) getTopLevelComments(exclude []*regexp.Regexp) []comment { - var comments []comment // nolint: prealloc + var comments []comment //nolint:prealloc for _, c := range pf.file.Comments { if c == nil || len(c.List) == 0 { continue @@ -157,7 +161,7 @@ func (pf *parsedFile) getTopLevelComments(exclude []*regexp.Regexp) []comment { // getDeclarationComments gets top level declaration comments. func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment { - var comments []comment // nolint: prealloc + var comments []comment //nolint:prealloc for _, decl := range pf.file.Decls { var cg *ast.CommentGroup switch d := decl.(type) { @@ -184,7 +188,7 @@ func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment // getAllComments gets every single comment from the file. func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { - var comments []comment //nolint: prealloc + var comments []comment //nolint:prealloc for _, c := range pf.file.Comments { if c == nil || len(c.List) == 0 { continue @@ -200,11 +204,13 @@ func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { return comments } -// getText extracts text from comment. If comment is a special block +// getText extracts text from comment. If the comment is a special block // (e.g., CGO code), a block of empty lines is returned. If comment contains // special lines (e.g., tags or indented code examples), they are replaced -// with `specialReplacer` to skip checks for it. +// with `specialReplacer` to skip checks for them. // The result can be multiline. +// +//nolint:cyclop func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { if len(comment.List) == 1 && strings.HasPrefix(comment.List[0].Text, "/*") && @@ -241,12 +247,12 @@ func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { return s[:len(s)-1] // trim last "\n" } -// readFile reads file and returns it's lines as strings. +// readFile reads file and returns its lines as strings. func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { fname := fset.File(file.Package) - f, err := ioutil.ReadFile(fname.Name()) + f, err := os.ReadFile(fname.Name()) if err != nil { - return nil, err + return nil, err //nolint:wrapcheck } return strings.Split(string(f), "\n"), nil } diff --git a/vendor/github.com/tetafro/godot/godot.go b/vendor/github.com/tetafro/godot/godot.go index 3a360a214b..e825e9a6de 100644 --- a/vendor/github.com/tetafro/godot/godot.go +++ b/vendor/github.com/tetafro/godot/godot.go @@ -3,10 +3,10 @@ package godot import ( + "errors" "fmt" "go/ast" "go/token" - "io/ioutil" "os" "regexp" "sort" @@ -25,12 +25,6 @@ type Issue struct { Replacement string } -// position is a position inside a comment (might be multiline comment). -type position struct { - line int // starts at 1 - column int // starts at 1, byte count -} - // comment is an internal representation of AST comment entity with additional // data attached. The latter is used for creating a full replacement for // the line with issues. @@ -44,18 +38,18 @@ type comment struct { // Run runs this linter on the provided code. func Run(file *ast.File, fset *token.FileSet, settings Settings) ([]Issue, error) { pf, err := newParsedFile(file, fset) - if err == errEmptyInput || err == errUnsuitableInput { + if errors.Is(err, errEmptyInput) || errors.Is(err, errUnsuitableInput) { return nil, nil } if err != nil { - return nil, fmt.Errorf("parse input file: %v", err) + return nil, fmt.Errorf("parse input file: %w", err) } exclude := make([]*regexp.Regexp, len(settings.Exclude)) for i := 0; i < len(settings.Exclude); i++ { exclude[i], err = regexp.Compile(settings.Exclude[i]) if err != nil { - return nil, fmt.Errorf("invalid regexp: %v", err) + return nil, fmt.Errorf("invalid regexp: %w", err) } } @@ -69,9 +63,9 @@ func Run(file *ast.File, fset *token.FileSet, settings Settings) ([]Issue, error // Fix fixes all issues and returns new version of file content. func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([]byte, error) { // Read file - content, err := ioutil.ReadFile(path) // nolint: gosec + content, err := os.ReadFile(path) //nolint:gosec if err != nil { - return nil, fmt.Errorf("read file: %v", err) + return nil, fmt.Errorf("read file: %w", err) } if len(content) == 0 { return nil, nil @@ -79,7 +73,7 @@ func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([ issues, err := Run(file, fset, settings) if err != nil { - return nil, fmt.Errorf("run linter: %v", err) + return nil, fmt.Errorf("run linter: %w", err) } // slice -> map @@ -102,21 +96,21 @@ func Fix(path string, file *ast.File, fset *token.FileSet, settings Settings) ([ return fixed, nil } -// Replace rewrites original file with it's fixed version. +// Replace rewrites original file with its fixed version. func Replace(path string, file *ast.File, fset *token.FileSet, settings Settings) error { info, err := os.Stat(path) if err != nil { - return fmt.Errorf("check file: %v", err) + return fmt.Errorf("check file: %w", err) } mode := info.Mode() fixed, err := Fix(path, file, fset, settings) if err != nil { - return fmt.Errorf("fix issues: %v", err) + return fmt.Errorf("fix issues: %w", err) } - if err := ioutil.WriteFile(path, fixed, mode); err != nil { - return fmt.Errorf("write file: %v", err) + if err := os.WriteFile(path, fixed, mode); err != nil { + return fmt.Errorf("write file: %w", err) } return nil } diff --git a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go index a7ff30b499..21bb485b4e 100644 --- a/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go +++ b/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -64,6 +64,8 @@ func (r runner) run(pass *analysis.Pass) (interface{}, error) { field := resStruct.Field(i) if field.Id() == "Body" { r.bodyObj = field + + break } } if r.bodyObj == nil { @@ -75,21 +77,20 @@ func (r runner) run(pass *analysis.Pass) (interface{}, error) { bmthd := bodyItrf.Method(i) if bmthd.Id() == closeMethod { r.closeMthd = bmthd + + break } } r.skipFile = map[*ast.File]bool{} +FuncLoop: for _, f := range funcs { // skip if the function is just referenced - var isreffunc bool for i := 0; i < f.Signature.Results().Len(); i++ { if f.Signature.Results().At(i).Type().String() == r.resTyp.String() { - isreffunc = true + continue FuncLoop } } - if isreffunc { - continue - } for _, b := range f.Blocks { for i := range b.Instrs { @@ -126,7 +127,12 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { resRefs := *val.Referrers() for _, resRef := range resRefs { switch resRef := resRef.(type) { - case *ssa.Store: // Call in Closure function + case *ssa.Store: // Call in Closure function / Response is global variable + if _, ok := resRef.Addr.(*ssa.Global); ok { + // Referrers for globals are always nil, so skip. + return false + } + if len(*resRef.Addr.Referrers()) == 0 { return true } @@ -144,11 +150,26 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } - case *ssa.Call: // Indirect function call - if f, ok := resRef.Call.Value.(*ssa.Function); ok { + case *ssa.Call, *ssa.Defer: // Indirect function call + // Hacky way to extract CommonCall + var call ssa.CallCommon + switch rr := resRef.(type) { + case *ssa.Call: + call = rr.Call + case *ssa.Defer: + call = rr.Call + } + + if f, ok := call.Value.(*ssa.Function); ok { for _, b := range f.Blocks { - for i := range b.Instrs { - return r.isopen(b, i) + for i, bi := range b.Instrs { + if r.isCloseCall(bi) { + return false + } + + if r.isopen(b, i) { + return true + } } } } @@ -202,6 +223,10 @@ func (r *runner) getResVal(instr ssa.Instruction) (ssa.Value, bool) { if instr.Type().String() == r.resTyp.String() { return instr, true } + case *ssa.Store: + if instr.Val.Type().String() == r.resTyp.String() { + return instr.Val, true + } } return nil, false } diff --git a/vendor/github.com/timonwong/loggercheck/.codecov.yml b/vendor/github.com/timonwong/loggercheck/.codecov.yml new file mode 100644 index 0000000000..ef90457cac --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: 70..90 # green if 90+, red if 70- + status: + patch: + # coverage status for pull request diff + default: + threshold: 1% # allow a little drop + project: + # coverage status for whole project + default: + target: auto # use coverage of base commit as target + threshold: 1% # allow a little drop + +ignore: + - "plugin/**" + - "cmd/**" diff --git a/vendor/github.com/timonwong/loggercheck/.gitignore b/vendor/github.com/timonwong/loggercheck/.gitignore new file mode 100644 index 0000000000..33df0df91c --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.gitignore @@ -0,0 +1,9 @@ +.vscode/ +.idea/ + +bin/ +vendor/ + +dist/ + +cover.out diff --git a/vendor/github.com/timonwong/loggercheck/.golangci.yml b/vendor/github.com/timonwong/loggercheck/.golangci.yml new file mode 100644 index 0000000000..2873278937 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.golangci.yml @@ -0,0 +1,94 @@ +linters-settings: + dupl: + threshold: 100 + funlen: + lines: 100 + statements: 50 + goconst: + min-len: 2 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - whyNoLint + gocyclo: + min-complexity: 15 + goimports: + local-prefixes: github.com/timonwong/loggercheck + gomnd: + # don't include the "operation" and "assign" + checks: + - argument + - case + - condition + - return + ignored-numbers: + - '0' + - '1' + - '2' + - '3' + ignored-functions: + - strings.SplitN + - strconv.ParseInt + govet: + check-shadowing: true + lll: + line-length: 140 + misspell: + locale: US + nolintlint: + allow-unused: false # report any unused nolint directives + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped +linters: + disable-all: true + enable: + - bodyclose + - dogsled + - dupl + - errcheck + - exportloopref + - funlen + - gochecknoinits + - goconst + - gocritic + - gocyclo + - gofumpt + - goimports + - gomnd + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - lll + - misspell + - nakedret + - noctx + - nolintlint + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - whitespace + +issues: + # Excluding configuration per-path, per-linter, per-text and per-source + exclude-rules: + - path: _test\.go + linters: + - gomnd + +run: + timeout: 5m + go: '1.17' + skip-dirs: + - testdata \ No newline at end of file diff --git a/vendor/github.com/timonwong/loggercheck/.goreleaser.yml b/vendor/github.com/timonwong/loggercheck/.goreleaser.yml new file mode 100644 index 0000000000..55ffe7ea03 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/.goreleaser.yml @@ -0,0 +1,59 @@ +--- +project_name: loggercheck + +release: + github: + owner: timonwong + name: loggercheck + +builds: + - binary: loggercheck + goos: + - darwin + - windows + - linux + goarch: + - '386' + - amd64 + - arm + - arm64 + goarm: + - '7' + env: + - CGO_ENABLED=0 + ignore: + - goos: darwin + goarch: '386' + main: ./cmd/loggercheck/ + flags: + - -trimpath + ldflags: -s -w + +archives: + - name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}' + format_overrides: + - goos: windows + format: zip + files: + - LICENSE + - README.md + +snapshot: + name_template: '{{ incpatch .Version }}-next' + +checksum: + name_template: 'checksums.txt' + +changelog: + sort: asc + filters: + exclude: + - '(?i)^docs?:' + - '(?i)^docs\([^:]+\):' + - '(?i)^docs\[[^:]+\]:' + - '^tests?:' + - '(?i)^dev:' + - '^build\(deps\): bump .* in /docs \(#\d+\)' + - '^build\(deps\): bump .* in /\.github/peril \(#\d+\)' + - Merge pull request + - Merge branch diff --git a/vendor/github.com/timonwong/loggercheck/LICENSE b/vendor/github.com/timonwong/loggercheck/LICENSE new file mode 100644 index 0000000000..fb65310900 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Timon Wong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/timonwong/loggercheck/Makefile b/vendor/github.com/timonwong/loggercheck/Makefile new file mode 100644 index 0000000000..37bf872024 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/Makefile @@ -0,0 +1,22 @@ +.PHONY: lint +lint: + golangci-lint run ./... + +.PHONY: test-deps +test-deps: + cd testdata/src/a && go mod vendor + +.PHONY: test +test: test-deps + go test -v -covermode=atomic -coverprofile=cover.out -coverpkg ./... ./... + +.PHONY: build +build: + go build -o bin/loggercheck ./cmd/loggercheck + +.PHONY: build-plugin +build-plugin: + CGO_ENABLED=1 go build -o bin/loggercheck.so -buildmode=plugin ./plugin + +.PHONY: build-all +build-all: build build-plugin diff --git a/vendor/github.com/timonwong/loggercheck/README.md b/vendor/github.com/timonwong/loggercheck/README.md new file mode 100644 index 0000000000..14aeca3717 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/README.md @@ -0,0 +1,103 @@ +# loggercheck + +## Description + +A linter checks the odd number of key and value pairs for common logger libraries: +- [kitlog](https://github.com/go-kit/log) +- [klog](https://github.com/kubernetes/klog) +- [logr](https://github.com/go-logr/logr) +- [zap](https://github.com/uber-go/zap) + +## Badges + +![Build Status](https://github.com/timonwong/loggercheck/workflows/CI/badge.svg) +[![Coverage](https://img.shields.io/codecov/c/github/timonwong/loggercheck?token=Nutf41gwoG)](https://app.codecov.io/gh/timonwong/loggercheck) +[![License](https://img.shields.io/github/license/timonwong/loggercheck.svg)](/LICENSE) +[![Release](https://img.shields.io/github/release/timonwong/loggercheck.svg)](https://github.com/timonwong/loggercheck/releases/latest) + +## Install + +```shel +go install github.com/timonwong/loggercheck/cmd/loggercheck +``` + +## Usage + +``` +loggercheck: Checks key value pairs for common logger libraries (kitlog,logr,klog,zap). + +Usage: loggercheck [-flag] [package] + + +Flags: + -V print version and exit + -all + no effect (deprecated) + -c int + display offending line with this many lines of context (default -1) + -cpuprofile string + write CPU profile to this file + -debug string + debug flags, any subset of "fpstv" + -disable value + comma-separated list of disabled logger checker (kitlog,klog,logr,zap) (default kitlog) + -fix + apply all suggested fixes + -flags + print analyzer flags in JSON + -json + emit JSON output + -memprofile string + write memory profile to this file + -noprintflike + require printf-like format specifier not present in args + -requirestringkey + require all logging keys to be inlined constant strings + -rulefile string + path to a file contains a list of rules + -source + no effect (deprecated) + -tags string + no effect (deprecated) + -test + indicates whether test files should be analyzed, too (default true) + -trace string + write trace log to this file + -v no effect (deprecated) +``` + +## Example + +```go +package a + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" +) + +func Example() { + log := logr.Discard() + log = log.WithValues("key") + log.Info("message", "key1", "value1", "key2", "value2", "key3") + log.Error(fmt.Errorf("error"), "message", "key1", "value1", "key2") + log.Error(fmt.Errorf("error"), "message", "key1", "value1", "key2", "value2") + + var log2 logr.Logger + log2 = log + log2.Info("message", "key1") + + log3 := logr.FromContextOrDiscard(context.TODO()) + log3.Error(fmt.Errorf("error"), "message", "key1") +} +``` + +``` +a.go:12:23: odd number of arguments passed as key-value pairs for logging +a.go:13:22: odd number of arguments passed as key-value pairs for logging +a.go:14:44: odd number of arguments passed as key-value pairs for logging +a.go:19:23: odd number of arguments passed as key-value pairs for logging +a.go:22:45: odd number of arguments passed as key-value pairs for logging +``` \ No newline at end of file diff --git a/vendor/github.com/timonwong/loggercheck/internal/bytebufferpool/pool.go b/vendor/github.com/timonwong/loggercheck/internal/bytebufferpool/pool.go new file mode 100644 index 0000000000..9d88d21c49 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/bytebufferpool/pool.go @@ -0,0 +1,22 @@ +package bytebufferpool + +import ( + "bytes" + "sync" +) + +var pool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +func Get() *bytes.Buffer { + buf := pool.Get().(*bytes.Buffer) + buf.Reset() + return buf +} + +func Put(buf *bytes.Buffer) { + pool.Put(buf) +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/checker.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/checker.go new file mode 100644 index 0000000000..5615636efb --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/checker.go @@ -0,0 +1,59 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +type Config struct { + RequireStringKey bool + NoPrintfLike bool +} + +type CallContext struct { + Expr *ast.CallExpr + Func *types.Func + Signature *types.Signature +} + +type Checker interface { + FilterKeyAndValues(pass *analysis.Pass, keyAndValues []ast.Expr) []ast.Expr + CheckLoggingKey(pass *analysis.Pass, keyAndValues []ast.Expr) + CheckPrintfLikeSpecifier(pass *analysis.Pass, args []ast.Expr) +} + +func ExecuteChecker(c Checker, pass *analysis.Pass, call CallContext, cfg Config) { + params := call.Signature.Params() + nparams := params.Len() // variadic => nonzero + startIndex := nparams - 1 + + lastArg := params.At(nparams - 1) + iface, ok := lastArg.Type().(*types.Slice).Elem().(*types.Interface) + if !ok || !iface.Empty() { + return // final (args) param is not ...interface{} + } + + keyValuesArgs := c.FilterKeyAndValues(pass, call.Expr.Args[startIndex:]) + + if len(keyValuesArgs)%2 != 0 { + firstArg := keyValuesArgs[0] + lastArg := keyValuesArgs[len(keyValuesArgs)-1] + pass.Report(analysis.Diagnostic{ + Pos: firstArg.Pos(), + End: lastArg.End(), + Category: DiagnosticCategory, + Message: "odd number of arguments passed as key-value pairs for logging", + }) + } + + if cfg.RequireStringKey { + c.CheckLoggingKey(pass, keyValuesArgs) + } + + if cfg.NoPrintfLike { + // Check all args + c.CheckPrintfLikeSpecifier(pass, call.Expr.Args) + } +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/common.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/common.go new file mode 100644 index 0000000000..42cbd01937 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/common.go @@ -0,0 +1,49 @@ +package checkers + +import ( + "go/ast" + "go/constant" + "go/printer" + "go/token" + "go/types" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + + "github.com/timonwong/loggercheck/internal/bytebufferpool" +) + +const ( + DiagnosticCategory = "logging" +) + +// extractValueFromStringArg returns true if the argument is a string type (literal or constant). +func extractValueFromStringArg(pass *analysis.Pass, arg ast.Expr) (value string, ok bool) { + if typeAndValue, ok := pass.TypesInfo.Types[arg]; ok { + if typ, ok := typeAndValue.Type.(*types.Basic); ok && typ.Kind() == types.String && typeAndValue.Value != nil { + return constant.StringVal(typeAndValue.Value), true + } + } + + return "", false +} + +func renderNodeEllipsis(fset *token.FileSet, v interface{}) string { + const maxLen = 20 + + buf := bytebufferpool.Get() + defer bytebufferpool.Put(buf) + + _ = printer.Fprint(buf, fset, v) + s := buf.String() + if utf8.RuneCountInString(s) > maxLen { + // Copied from go/constant/value.go + i := 0 + for n := 0; n < maxLen-3; n++ { + _, size := utf8.DecodeRuneInString(s[i:]) + i += size + } + s = s[:i] + "..." + } + return s +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/general.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/general.go new file mode 100644 index 0000000000..6512cce30d --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/general.go @@ -0,0 +1,68 @@ +package checkers + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + + "github.com/timonwong/loggercheck/internal/checkers/printf" + "github.com/timonwong/loggercheck/internal/stringutil" +) + +type General struct{} + +func (g General) FilterKeyAndValues(_ *analysis.Pass, keyAndValues []ast.Expr) []ast.Expr { + return keyAndValues +} + +func (g General) CheckLoggingKey(pass *analysis.Pass, keyAndValues []ast.Expr) { + for i := 0; i < len(keyAndValues); i += 2 { + arg := keyAndValues[i] + if value, ok := extractValueFromStringArg(pass, arg); ok { + if stringutil.IsASCII(value) { + continue + } + + pass.Report(analysis.Diagnostic{ + Pos: arg.Pos(), + End: arg.End(), + Category: DiagnosticCategory, + Message: fmt.Sprintf( + "logging keys are expected to be alphanumeric strings, please remove any non-latin characters from %q", + value), + }) + } else { + pass.Report(analysis.Diagnostic{ + Pos: arg.Pos(), + End: arg.End(), + Category: DiagnosticCategory, + Message: fmt.Sprintf( + "logging keys are expected to be inlined constant strings, please replace %q provided with string", + renderNodeEllipsis(pass.Fset, arg)), + }) + } + } +} + +func (g General) CheckPrintfLikeSpecifier(pass *analysis.Pass, args []ast.Expr) { + for _, arg := range args { + format, ok := extractValueFromStringArg(pass, arg) + if !ok { + continue + } + + if specifier, ok := printf.IsPrintfLike(format); ok { + pass.Report(analysis.Diagnostic{ + Pos: arg.Pos(), + End: arg.End(), + Category: DiagnosticCategory, + Message: fmt.Sprintf("logging message should not use format specifier %q", specifier), + }) + + return // One error diagnostic is enough + } + } +} + +var _ Checker = (*General)(nil) diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/printf/printf.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/printf/printf.go new file mode 100644 index 0000000000..b38f46f201 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/printf/printf.go @@ -0,0 +1,252 @@ +package printf + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Copied from golang.org/x/tools +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +type printVerb struct { + verb rune // User may provide verb through Formatter; could be a rune. + flags string // known flags are all ASCII +} + +// Common flag sets for printf verbs. +const ( + noFlag = "" + numFlag = " -+.0" + sharpNumFlag = " -+.0#" + allFlags = " -+.0#" +) + +// printVerbs identifies which flags are known to printf for each verb. +var printVerbs = []printVerb{ + // '-' is a width modifier, always valid. + // '.' is a precision for float, max width for strings. + // '+' is required sign for numbers, Go format for %v. + // '#' is alternate format for several verbs. + // ' ' is spacer for numbers + {'%', noFlag}, + {'b', sharpNumFlag}, + {'c', "-"}, + {'d', numFlag}, + {'e', sharpNumFlag}, + {'E', sharpNumFlag}, + {'f', sharpNumFlag}, + {'F', sharpNumFlag}, + {'g', sharpNumFlag}, + {'G', sharpNumFlag}, + {'o', sharpNumFlag}, + {'O', sharpNumFlag}, + {'p', "-#"}, + {'q', " -+.0#"}, + {'s', " -+.0"}, + {'t', "-"}, + {'T', "-"}, + {'U', "-#"}, + {'v', allFlags}, + {'w', allFlags}, + {'x', sharpNumFlag}, + {'X', sharpNumFlag}, +} + +// formatState holds the parsed representation of a printf directive such as "%3.*[4]d". +// It is constructed by parsePrintfVerb. +type formatState struct { + verb rune // the format verb: 'd' for "%d" + format string // the full format directive from % through verb, "%.3d". + flags []byte // the list of # + etc. + // Used only during parse. + hasIndex bool // Whether the argument is indexed. + indexPending bool // Whether we have an indexed argument that has not resolved. + nbytes int // number of bytes of the format string consumed. +} + +// parseFlags accepts any printf flags. +func (s *formatState) parseFlags() { + for s.nbytes < len(s.format) { + switch c := s.format[s.nbytes]; c { + case '#', '0', '+', '-', ' ': + s.flags = append(s.flags, c) + s.nbytes++ + default: + return + } + } +} + +// scanNum advances through a decimal number if present. +func (s *formatState) scanNum() { + for ; s.nbytes < len(s.format); s.nbytes++ { + c := s.format[s.nbytes] + if c < '0' || '9' < c { + return + } + } +} + +func stringIndexAt(s, substr string, start int) int { + idx := strings.Index(s[start:], substr) + if idx < 0 { + return idx + } + return idx + start +} + +// parseIndex scans an index expression. It returns false if there is a syntax error. +func (s *formatState) parseIndex() bool { + if s.nbytes == len(s.format) || s.format[s.nbytes] != '[' { + return true + } + // Argument index present. + s.nbytes++ // skip '[' + start := s.nbytes + s.scanNum() + ok := true + if s.nbytes == len(s.format) || s.nbytes == start || s.format[s.nbytes] != ']' { + ok = false + s.nbytes = stringIndexAt(s.format, "]", start) + if s.nbytes < 0 { + return false + } + } + arg32, err := strconv.ParseInt(s.format[start:s.nbytes], 10, 32) + if err != nil || !ok || arg32 <= 0 { + return false + } + s.nbytes++ // skip ']' + s.hasIndex = true + s.indexPending = true + return true +} + +// parseNum scans a width or precision (or *). +func (s *formatState) parseNum() { + if s.nbytes < len(s.format) && s.format[s.nbytes] == '*' { + if s.indexPending { // Absorb it. + s.indexPending = false + } + s.nbytes++ + } else { + s.scanNum() + } +} + +// parsePrecision scans for a precision. It returns false if there's a bad index expression. +func (s *formatState) parsePrecision() bool { + // If there's a period, there may be a precision. + if s.nbytes < len(s.format) && s.format[s.nbytes] == '.' { + s.flags = append(s.flags, '.') // Treat precision as a flag. + s.nbytes++ + if !s.parseIndex() { + return false + } + s.parseNum() + } + return true +} + +// parsePrintfVerb looks the formatting directive that begins the format string +// and returns a formatState that encodes what the directive wants, without looking +// at the actual arguments present in the call. The result is nil if there is an error. +func parsePrintfVerb(format string) *formatState { + state := &formatState{ + format: format, + flags: make([]byte, 0, 5), //nolint:gomnd + nbytes: 1, // There's guaranteed to be a percent sign. + } + + // There may be flags. + state.parseFlags() + // There may be an index. + if !state.parseIndex() { + return nil + } + // There may be a width. + state.parseNum() + // There may be a precision. + if !state.parsePrecision() { + return nil + } + // Now a verb, possibly prefixed by an index (which we may already have). + if !state.indexPending && !state.parseIndex() { + return nil + } + if state.nbytes == len(state.format) { + // missing verb at end of string + return nil + } + verb, w := utf8.DecodeRuneInString(state.format[state.nbytes:]) + state.verb = verb + state.nbytes += w + state.format = state.format[:state.nbytes] + return state +} + +func containsAll(s string, pattern []byte) bool { + for _, c := range pattern { + if !strings.ContainsRune(s, rune(c)) { + return false + } + } + return true +} + +func isPrintfArg(state *formatState) bool { + var v printVerb + found := false + // Linear scan is fast enough for a small list. + for _, v = range printVerbs { + if v.verb == state.verb { + found = true + break + } + } + + if !found { + // unknown verb, just skip + return false + } + + if !containsAll(v.flags, state.flags) { + // unrecognized format flag, just skip + return false + } + + return true +} + +func IsPrintfLike(format string) (firstSpecifier string, ok bool) { + if !strings.Contains(format, "%") { + return "", false + } + + for i, w := 0, 0; i < len(format); i += w { + w = 1 + if format[i] != '%' { + continue + } + + state := parsePrintfVerb(format[i:]) + if state == nil { + return "", false + } + + w = len(state.format) + if !isPrintfArg(state) { + return "", false + } + + if !ok { + firstSpecifier = state.format + ok = true + } + } + + return +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/checkers/zap.go b/vendor/github.com/timonwong/loggercheck/internal/checkers/zap.go new file mode 100644 index 0000000000..2356f83482 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/checkers/zap.go @@ -0,0 +1,42 @@ +package checkers + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" +) + +type Zap struct { + General +} + +func (z Zap) FilterKeyAndValues(pass *analysis.Pass, keyAndValues []ast.Expr) []ast.Expr { + // Check the argument count + filtered := make([]ast.Expr, 0, len(keyAndValues)) + for _, arg := range keyAndValues { + // Skip any zapcore.Field we found + switch arg := arg.(type) { + case *ast.CallExpr, *ast.Ident: + typ := pass.TypesInfo.TypeOf(arg) + switch typ := typ.(type) { + case *types.Named: + obj := typ.Obj() + // This is a strongly-typed field. Consume it and move on. + // Actually it's go.uber.org/zap/zapcore.Field, however for simplicity + // we don't check the import path + if obj != nil && obj.Name() == "Field" { + continue + } + default: + // pass + } + } + + filtered = append(filtered, arg) + } + + return filtered +} + +var _ Checker = (*Zap)(nil) diff --git a/vendor/github.com/timonwong/loggercheck/internal/rules/rules.go b/vendor/github.com/timonwong/loggercheck/internal/rules/rules.go new file mode 100644 index 0000000000..27d6ebb274 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/rules/rules.go @@ -0,0 +1,201 @@ +package rules + +import ( + "bufio" + "errors" + "fmt" + "go/types" + "io" + "strings" + + "github.com/timonwong/loggercheck/internal/bytebufferpool" +) + +var ErrInvalidRule = errors.New("invalid rule format") + +const CustomRulesetName = "custom" + +type Ruleset struct { + Name string + PackageImport string + Rules []FuncRule + + ruleIndicesByFuncName map[string][]int +} + +func (rs *Ruleset) Match(fn *types.Func) bool { + // PackageImport is already checked (by indices), skip checking it here + sig := fn.Type().(*types.Signature) // it's safe since we already checked + + // Fail fast if the function name is not in the rule list. + indices, ok := rs.ruleIndicesByFuncName[fn.Name()] + if !ok { + return false + } + + for _, idx := range indices { + rule := &rs.Rules[idx] + if matchRule(rule, sig) { + return true + } + } + + return false +} + +func receiverTypeOf(recvType types.Type) string { + buf := bytebufferpool.Get() + defer bytebufferpool.Put(buf) + + var recvNamed *types.Named + switch recvType := recvType.(type) { + case *types.Pointer: + buf.WriteByte('*') + if elem, ok := recvType.Elem().(*types.Named); ok { + recvNamed = elem + } + case *types.Named: + recvNamed = recvType + } + + if recvNamed == nil { + // not supported type + return "" + } + + buf.WriteString(recvNamed.Obj().Name()) + typeParams := recvNamed.TypeParams() + if typeParamsLen := typeParams.Len(); typeParamsLen > 0 { + buf.WriteByte('[') + for i := 0; i < typeParamsLen; i++ { + if i > 0 { + // comma as separator + buf.WriteByte(',') + } + p := typeParams.At(i) + buf.WriteString(p.Obj().Name()) + } + buf.WriteByte(']') + } + + return buf.String() +} + +func matchRule(p *FuncRule, sig *types.Signature) bool { + // we do not check package import here since it's already checked in Match() + recv := sig.Recv() + isReceiver := recv != nil + if isReceiver != p.IsReceiver { + return false + } + + if isReceiver { + recvType := recv.Type() + receiverType := receiverTypeOf(recvType) + if receiverType != p.ReceiverType { + return false + } + } + + return true +} + +type FuncRule struct { // package import should be accessed from Rulset + ReceiverType string + FuncName string + IsReceiver bool +} + +func ParseFuncRule(rule string) (packageImport string, pat FuncRule, err error) { + lastDot := strings.LastIndexFunc(rule, func(r rune) bool { + return r == '.' || r == '/' + }) + if lastDot == -1 || rule[lastDot] == '/' { + return "", pat, ErrInvalidRule + } + + importOrReceiver := rule[:lastDot] + pat.FuncName = rule[lastDot+1:] + + if strings.HasPrefix(rule, "(") { // package + if !strings.HasSuffix(importOrReceiver, ")") { + return "", FuncRule{}, ErrInvalidRule + } + + var isPointerReceiver bool + pat.IsReceiver = true + receiver := importOrReceiver[1 : len(importOrReceiver)-1] + if strings.HasPrefix(receiver, "*") { + isPointerReceiver = true + receiver = receiver[1:] + } + + typeDotIdx := strings.LastIndexFunc(receiver, func(r rune) bool { + return r == '.' || r == '/' + }) + if typeDotIdx == -1 || receiver[typeDotIdx] == '/' { + return "", FuncRule{}, ErrInvalidRule + } + receiverType := receiver[typeDotIdx+1:] + if isPointerReceiver { + receiverType = "*" + receiverType + } + pat.ReceiverType = receiverType + packageImport = receiver[:typeDotIdx] + } else { + packageImport = importOrReceiver + } + + return packageImport, pat, nil +} + +func ParseRules(lines []string) (result []Ruleset, err error) { + rulesByImport := make(map[string][]FuncRule) + for i, line := range lines { + if line == "" { + continue + } + + if strings.HasPrefix(line, "#") { // comments + continue + } + + packageImport, pat, err := ParseFuncRule(line) + if err != nil { + return nil, fmt.Errorf("error parse rule at line %d: %w", i+1, err) + } + rulesByImport[packageImport] = append(rulesByImport[packageImport], pat) + } + + for packageImport, rules := range rulesByImport { + ruleIndicesByFuncName := make(map[string][]int, len(rules)) + for idx, rule := range rules { + fnName := rule.FuncName + ruleIndicesByFuncName[fnName] = append(ruleIndicesByFuncName[fnName], idx) + } + + result = append(result, Ruleset{ + Name: CustomRulesetName, // NOTE(timonwong) Always "custom" for custom rule + PackageImport: packageImport, + Rules: rules, + ruleIndicesByFuncName: ruleIndicesByFuncName, + }) + } + return result, nil +} + +func ParseRuleFile(r io.Reader) (result []Ruleset, err error) { + // Rule files are relatively small, so read it into string slice first. + var lines []string + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + lines = append(lines, line) + } + if err := scanner.Err(); err != nil { + return nil, err + } + + return ParseRules(lines) +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/sets/string.go b/vendor/github.com/timonwong/loggercheck/internal/sets/string.go new file mode 100644 index 0000000000..daf8d57fce --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/sets/string.go @@ -0,0 +1,59 @@ +package sets + +import ( + "sort" + "strings" +) + +type Empty struct{} + +type StringSet map[string]Empty + +func NewString(items ...string) StringSet { + s := make(StringSet) + s.Insert(items...) + return s +} + +func (s StringSet) Insert(items ...string) { + for _, item := range items { + s[item] = Empty{} + } +} + +func (s StringSet) Has(item string) bool { + _, contained := s[item] + return contained +} + +func (s StringSet) List() []string { + if len(s) == 0 { + return nil + } + + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Strings(res) + return res +} + +// Set implements flag.Value interface. +func (s *StringSet) Set(v string) error { + v = strings.TrimSpace(v) + if v == "" { + *s = nil + return nil + } + + parts := strings.Split(v, ",") + set := NewString(parts...) + *s = set + return nil +} + +// String implements flag.Value interface +func (s StringSet) String() string { + return strings.Join(s.List(), ",") +} diff --git a/vendor/github.com/timonwong/loggercheck/internal/stringutil/is.go b/vendor/github.com/timonwong/loggercheck/internal/stringutil/is.go new file mode 100644 index 0000000000..a36b742fc4 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/internal/stringutil/is.go @@ -0,0 +1,15 @@ +package stringutil + +import "unicode/utf8" + +// IsASCII returns true if string are ASCII. +func IsASCII(s string) bool { + for _, r := range s { + if r >= utf8.RuneSelf { + // Not ASCII. + return false + } + } + + return true +} diff --git a/vendor/github.com/timonwong/loggercheck/loggercheck.go b/vendor/github.com/timonwong/loggercheck/loggercheck.go new file mode 100644 index 0000000000..8bd10aee80 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/loggercheck.go @@ -0,0 +1,196 @@ +package loggercheck + +import ( + "flag" + "fmt" + "go/ast" + "go/types" + "os" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + + "github.com/timonwong/loggercheck/internal/checkers" + "github.com/timonwong/loggercheck/internal/rules" + "github.com/timonwong/loggercheck/internal/sets" +) + +const Doc = `Checks key value pairs for common logger libraries (kitlog,klog,logr,zap).` + +func NewAnalyzer(opts ...Option) *analysis.Analyzer { + l := newLoggerCheck(opts...) + a := &analysis.Analyzer{ + Name: "loggercheck", + Doc: Doc, + Flags: *l.fs, + Run: l.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } + return a +} + +type loggercheck struct { + fs *flag.FlagSet + + disable sets.StringSet // flag -disable + ruleFile string // flag -rulefile + requireStringKey bool // flag -requirestringkey + noPrintfLike bool // flag -noprintflike + + rules []string // used for external integration, for example golangci-lint + rulesetList []rules.Ruleset // populate at runtime + rulesetIndicesByImport map[string][]int // ruleset index, populate at runtime +} + +func newLoggerCheck(opts ...Option) *loggercheck { + fs := flag.NewFlagSet("loggercheck", flag.ExitOnError) + l := &loggercheck{ + fs: fs, + disable: sets.NewString("kitlog"), + rulesetList: append([]rules.Ruleset{}, staticRuleList...), // ensure we make a clone of static rules first + } + + fs.StringVar(&l.ruleFile, "rulefile", "", "path to a file contains a list of rules") + fs.Var(&l.disable, "disable", "comma-separated list of disabled logger checker (kitlog,klog,logr,zap)") + fs.BoolVar(&l.requireStringKey, "requirestringkey", false, "require all logging keys to be inlined constant strings") + fs.BoolVar(&l.noPrintfLike, "noprintflike", false, "require printf-like format specifier not present in args") + + for _, opt := range opts { + opt(l) + } + + return l +} + +func (l *loggercheck) isCheckerDisabled(name string) bool { + return l.disable.Has(name) +} + +// vendorLessPath returns the devendorized version of the import path ipath. +// For example: "a/vendor/github.com/go-logr/logr" will become "github.com/go-logr/logr". +func vendorLessPath(ipath string) string { + if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 { + return ipath[i+len("/vendor/"):] + } + return ipath +} + +func (l *loggercheck) getCheckerForFunc(fn *types.Func) checkers.Checker { + pkg := fn.Pkg() + if pkg == nil { + return nil + } + + pkgPath := vendorLessPath(pkg.Path()) + indices := l.rulesetIndicesByImport[pkgPath] + + for _, idx := range indices { + rs := &l.rulesetList[idx] + if l.isCheckerDisabled(rs.Name) { + // Skip ignored logger checker. + continue + } + + if !rs.Match(fn) { + continue + } + + checker := checkerByRulesetName[rs.Name] + if checker == nil { + return checkers.General{} + } + return checker + } + + return nil +} + +func (l *loggercheck) checkLoggerArguments(pass *analysis.Pass, call *ast.CallExpr) { + fn, _ := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if fn == nil { + return // function pointer is not supported + } + + sig, ok := fn.Type().(*types.Signature) + if !ok || !sig.Variadic() { + return // not variadic + } + + // ellipsis args is hard, just skip + if call.Ellipsis.IsValid() { + return + } + + checker := l.getCheckerForFunc(fn) + if checker == nil { + return + } + + checkers.ExecuteChecker(checker, pass, checkers.CallContext{ + Expr: call, + Func: fn, + Signature: sig, + }, checkers.Config{ + RequireStringKey: l.requireStringKey, + NoPrintfLike: l.noPrintfLike, + }) +} + +func (l *loggercheck) processConfig() error { + if l.ruleFile != "" { // flags takes precedence over configs + f, err := os.Open(l.ruleFile) + if err != nil { + return fmt.Errorf("failed to open rule file: %w", err) + } + defer f.Close() + + custom, err := rules.ParseRuleFile(f) + if err != nil { + return fmt.Errorf("failed to parse rule file: %w", err) + } + l.rulesetList = append(l.rulesetList, custom...) + } else if len(l.rules) > 0 { + custom, err := rules.ParseRules(l.rules) + if err != nil { + return fmt.Errorf("failed to parse rules: %w", err) + } + l.rulesetList = append(l.rulesetList, custom...) + } + + // Build index + indices := make(map[string][]int) + for i, rs := range l.rulesetList { + indices[rs.PackageImport] = append(indices[rs.PackageImport], i) + } + l.rulesetIndicesByImport = indices + + return nil +} + +func (l *loggercheck) run(pass *analysis.Pass) (interface{}, error) { + err := l.processConfig() + if err != nil { + return nil, err + } + + insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + insp.Preorder(nodeFilter, func(node ast.Node) { + call := node.(*ast.CallExpr) + + typ := pass.TypesInfo.Types[call.Fun].Type + if typ == nil { + // Skip checking functions with unknown type. + return + } + + l.checkLoggerArguments(pass, call) + }) + + return nil, nil +} diff --git a/vendor/github.com/timonwong/loggercheck/options.go b/vendor/github.com/timonwong/loggercheck/options.go new file mode 100644 index 0000000000..6b5f00af1e --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/options.go @@ -0,0 +1,31 @@ +package loggercheck + +import ( + "github.com/timonwong/loggercheck/internal/sets" +) + +type Option func(*loggercheck) + +func WithDisable(disable []string) Option { + return func(l *loggercheck) { + l.disable = sets.NewString(disable...) + } +} + +func WithRules(customRules []string) Option { + return func(l *loggercheck) { + l.rules = customRules + } +} + +func WithRequireStringKey(requireStringKey bool) Option { + return func(l *loggercheck) { + l.requireStringKey = requireStringKey + } +} + +func WithNoPrintfLike(noPrintfLike bool) Option { + return func(l *loggercheck) { + l.noPrintfLike = noPrintfLike + } +} diff --git a/vendor/github.com/timonwong/loggercheck/staticrules.go b/vendor/github.com/timonwong/loggercheck/staticrules.go new file mode 100644 index 0000000000..f955b34341 --- /dev/null +++ b/vendor/github.com/timonwong/loggercheck/staticrules.go @@ -0,0 +1,68 @@ +package loggercheck + +import ( + "errors" + "fmt" + + "github.com/timonwong/loggercheck/internal/checkers" + "github.com/timonwong/loggercheck/internal/rules" +) + +var ( + staticRuleList = []rules.Ruleset{ + mustNewStaticRuleSet("logr", []string{ + "(github.com/go-logr/logr.Logger).Error", + "(github.com/go-logr/logr.Logger).Info", + "(github.com/go-logr/logr.Logger).WithValues", + }), + mustNewStaticRuleSet("klog", []string{ + "k8s.io/klog/v2.InfoS", + "k8s.io/klog/v2.InfoSDepth", + "k8s.io/klog/v2.ErrorS", + "(k8s.io/klog/v2.Verbose).InfoS", + "(k8s.io/klog/v2.Verbose).InfoSDepth", + "(k8s.io/klog/v2.Verbose).ErrorS", + }), + mustNewStaticRuleSet("zap", []string{ + "(*go.uber.org/zap.SugaredLogger).With", + "(*go.uber.org/zap.SugaredLogger).Debugw", + "(*go.uber.org/zap.SugaredLogger).Infow", + "(*go.uber.org/zap.SugaredLogger).Warnw", + "(*go.uber.org/zap.SugaredLogger).Errorw", + "(*go.uber.org/zap.SugaredLogger).DPanicw", + "(*go.uber.org/zap.SugaredLogger).Panicw", + "(*go.uber.org/zap.SugaredLogger).Fatalw", + }), + mustNewStaticRuleSet("kitlog", []string{ + "github.com/go-kit/log.With", + "github.com/go-kit/log.WithPrefix", + "github.com/go-kit/log.WithSuffix", + "(github.com/go-kit/log.Logger).Log", + }), + } + checkerByRulesetName = map[string]checkers.Checker{ + // by default, checkers.General will be used. + "zap": checkers.Zap{}, + } +) + +// mustNewStaticRuleSet only called at init, catch errors during development. +// In production it will not panic. +func mustNewStaticRuleSet(name string, lines []string) rules.Ruleset { + if len(lines) == 0 { + panic(errors.New("no rules provided")) + } + + rulesetList, err := rules.ParseRules(lines) + if err != nil { + panic(err) + } + + if len(rulesetList) != 1 { + panic(fmt.Errorf("expected 1 ruleset, got %d", len(rulesetList))) + } + + ruleset := rulesetList[0] + ruleset.Name = name + return ruleset +} diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go index 3b445e2950..6da17bd867 100644 --- a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go +++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -1,29 +1,28 @@ package wrapcheck import ( + "fmt" "go/ast" "go/token" "go/types" - "log" - "os" + "regexp" "strings" "github.com/gobwas/glob" "golang.org/x/tools/go/analysis" ) -var ( - DefaultIgnoreSigs = []string{ - ".Errorf(", - "errors.New(", - "errors.Unwrap(", - ".Wrap(", - ".Wrapf(", - ".WithMessage(", - ".WithMessagef(", - ".WithStack(", - } -) +var DefaultIgnoreSigs = []string{ + ".Errorf(", + "errors.New(", + "errors.Unwrap(", + "errors.Join(", + ".Wrap(", + ".Wrapf(", + ".WithMessage(", + ".WithMessagef(", + ".WithStack(", +} // WrapcheckConfig is the set of configuration values which configure the // behaviour of the linter. @@ -33,7 +32,7 @@ type WrapcheckConfig struct { // allows you to specify functions that wrapcheck will not report as // unwrapped. // - // For example, an ingoredSig of `[]string{"errors.New("}` will ignore errors + // For example, an ignoreSig of `[]string{"errors.New("}` will ignore errors // returned from the stdlib package error's function: // // `func errors.New(message string) error` @@ -45,6 +44,20 @@ type WrapcheckConfig struct { // list to your config. IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` + // IgnoreSigRegexps defines a list of regular expressions which if matched + // to the signature of the function call returning the error, will be ignored. This + // allows you to specify functions that wrapcheck will not report as + // unwrapped. + // + // For example, an ignoreSigRegexp of `[]string{"\.New.*Err\("}`` will ignore errors + // returned from any signature whose method name starts with "New" and ends with "Err" + // due to the signature matching the regular expression `\.New.*Err\(`. + // + // Note that this is similar to the ignoreSigs configuration, but provides + // slightly more flexibility in defining rules by which signatures will be + // ignored. + IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps" yaml:"ignoreSigRegexps"` + // IgnorePackageGlobs defines a list of globs which, if matching the package // of the function returning the error, will ignore the error when doing // wrapcheck analysis. @@ -57,12 +70,23 @@ type WrapcheckConfig struct { // ignorePackageGlobs: // - encoding/* IgnorePackageGlobs []string `mapstructure:"ignorePackageGlobs" yaml:"ignorePackageGlobs"` + + // IgnoreInterfaceRegexps defines a list of regular expressions which, if matched + // to a underlying interface name, will ignore unwrapped errors returned from a + // function whose call is defined on the given interface. + // + // For example, an ignoreInterfaceRegexps of `[]string{"Transac(tor|tion)"}` will ignore errors + // returned from any function whose call is defined on a interface named 'Transactor' + // or 'Transaction' due to the name matching the regular expression `Transac(tor|tion)`. + IgnoreInterfaceRegexps []string `mapstructure:"ignoreInterfaceRegexps" yaml:"ignoreInterfaceRegexps"` } func NewDefaultConfig() WrapcheckConfig { return WrapcheckConfig{ - IgnoreSigs: DefaultIgnoreSigs, - IgnorePackageGlobs: []string{}, + IgnoreSigs: DefaultIgnoreSigs, + IgnoreSigRegexps: []string{}, + IgnorePackageGlobs: []string{}, + IgnoreInterfaceRegexps: []string{}, } } @@ -75,13 +99,29 @@ func NewAnalyzer(cfg WrapcheckConfig) *analysis.Analyzer { } func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { + // Precompile the regexps, report the error + var ( + ignoreSigRegexp []*regexp.Regexp + ignoreInterfaceRegexps []*regexp.Regexp + ignorePackageGlobs []glob.Glob + err error + ) + + ignoreSigRegexp, err = compileRegexps(cfg.IgnoreSigRegexps) + if err == nil { + ignoreInterfaceRegexps, err = compileRegexps(cfg.IgnoreInterfaceRegexps) + } + if err == nil { + ignorePackageGlobs, err = compileGlobs(cfg.IgnorePackageGlobs) + } + return func(pass *analysis.Pass) (interface{}, error) { + if err != nil { + return nil, err + } + for _, file := range pass.Files { ast.Inspect(file, func(n ast.Node) bool { - if _, ok := n.(*ast.AssignStmt); ok { - return true - } - ret, ok := n.(*ast.ReturnStmt) if !ok { return true @@ -100,8 +140,9 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { // If the return type of the function is a single error. This will not // match an error within multiple return values, for that, the below // tuple check is required. + if isError(pass.TypesInfo.TypeOf(expr)) { - reportUnwrapped(pass, retFn, retFn.Pos(), cfg) + reportUnwrapped(pass, retFn, retFn.Pos(), cfg, ignoreSigRegexp, ignoreInterfaceRegexps, ignorePackageGlobs) return true } @@ -119,7 +160,7 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { return true } if isError(v.Type()) { - reportUnwrapped(pass, retFn, expr.Pos(), cfg) + reportUnwrapped(pass, retFn, expr.Pos(), cfg, ignoreSigRegexp, ignoreInterfaceRegexps, ignorePackageGlobs) return true } } @@ -134,9 +175,7 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { return true } - var ( - call *ast.CallExpr - ) + var call *ast.CallExpr // Attempt to find the most recent short assign if shortAss := prevErrAssign(pass, file, ident); shortAss != nil { @@ -183,7 +222,7 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { return true } - reportUnwrapped(pass, call, ident.NamePos, cfg) + reportUnwrapped(pass, call, ident.NamePos, cfg, ignoreSigRegexp, ignoreInterfaceRegexps, ignorePackageGlobs) } return true @@ -196,7 +235,16 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) { // Report unwrapped takes a call expression and an identifier and reports // if the call is unwrapped. -func reportUnwrapped(pass *analysis.Pass, call *ast.CallExpr, tokenPos token.Pos, cfg WrapcheckConfig) { +func reportUnwrapped( + pass *analysis.Pass, + call *ast.CallExpr, + tokenPos token.Pos, + cfg WrapcheckConfig, + regexpsSig []*regexp.Regexp, + regexpsInter []*regexp.Regexp, + pkgGlobs []glob.Glob, +) { + sel, ok := call.Fun.(*ast.SelectorExpr) if !ok { return @@ -206,19 +254,26 @@ func reportUnwrapped(pass *analysis.Pass, call *ast.CallExpr, tokenPos token.Pos fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() if contains(cfg.IgnoreSigs, fnSig) { return + } else if containsMatch(regexpsSig, fnSig) { + return } // Check if the underlying type of the "x" in x.y.z is an interface, as - // errors returned from interface types should be wrapped. + // errors returned from interface types should be wrapped, unless ignored + // as per `ignoreInterfaceRegexps` if isInterface(pass, sel) { - pass.Reportf(tokenPos, "error returned from interface method should be wrapped: sig: %s", fnSig) - return + pkgPath := pass.TypesInfo.ObjectOf(sel.Sel).Pkg().Path() + name := types.TypeString(pass.TypesInfo.TypeOf(sel.X), func(p *types.Package) string { return p.Name() }) + if !containsMatch(regexpsInter, name) && !containsMatchGlob(pkgGlobs, pkgPath) { + pass.Reportf(tokenPos, "error returned from interface method should be wrapped: sig: %s", fnSig) + return + } } // Check whether the function being called comes from another package, // as functions called across package boundaries which returns errors // should be wrapped - if isFromOtherPkg(pass, sel, cfg) { + if isFromOtherPkg(pass, sel, pkgGlobs) { pass.Reportf(tokenPos, "error returned from external package is unwrapped: sig: %s", fnSig) return } @@ -231,20 +286,14 @@ func isInterface(pass *analysis.Pass, sel *ast.SelectorExpr) bool { return ok } -func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, config WrapcheckConfig) bool { +// isFromotherPkg returns whether the function is defined in the package +// currently under analysis or is considered external. It will ignore packages +// defined in config.IgnorePackageGlobs. +func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, pkgGlobs []glob.Glob) bool { // The package of the function that we are calling which returns the error fn := pass.TypesInfo.ObjectOf(sel.Sel) - - for _, globString := range config.IgnorePackageGlobs { - g, err := glob.Compile(globString) - if err != nil { - log.Printf("unable to parse glob: %s\n", globString) - os.Exit(1) - } - - if g.Match(fn.Pkg().Path()) { - return false - } + if containsMatchGlob(pkgGlobs, fn.Pkg().Path()) { + return false } // If it's not a package name, then we should check the selector to make sure @@ -264,7 +313,7 @@ func isFromOtherPkg(pass *analysis.Pass, sel *ast.SelectorExpr, config Wrapcheck // `=`. This does not include `var` statements. This function will return nil if // the only declaration is a `var` (aka ValueSpec) declaration. func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) *ast.AssignStmt { - // A slice containing all the assignments which contain an identifer + // A slice containing all the assignments which contain an identifier // referring to the source declaration of the error. This is to catch // cases where err is defined once, and then reassigned multiple times // within the same block. In these cases, we should check the method of @@ -278,6 +327,7 @@ func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) if !isError(pass.TypesInfo.TypeOf(expr)) { continue } + if assIdent, ok := expr.(*ast.Ident); ok { if assIdent.Obj == nil || returnIdent.Obj == nil { // If we can't find the Obj for one of the identifiers, just skip @@ -301,6 +351,7 @@ func prevErrAssign(pass *analysis.Pass, file *ast.File, returnIdent *ast.Ident) if ass.Pos() > returnIdent.Pos() { break } + mostRecentAssign = ass } @@ -317,6 +368,26 @@ func contains(slice []string, el string) bool { return false } +func containsMatch(regexps []*regexp.Regexp, el string) bool { + for _, re := range regexps { + if re.MatchString(el) { + return true + } + } + + return false +} + +func containsMatchGlob(globs []glob.Glob, el string) bool { + for _, g := range globs { + if g.Match(el) { + return true + } + } + + return false +} + // isError returns whether or not the provided type interface is an error func isError(typ types.Type) bool { if typ == nil { @@ -335,3 +406,34 @@ func isUnresolved(file *ast.File, ident *ast.Ident) bool { return false } + +// compileRegexps compiles a set of regular expressions returning them for use, +// or the first encountered error due to an invalid expression. +func compileRegexps(regexps []string) ([]*regexp.Regexp, error) { + compiledRegexps := make([]*regexp.Regexp, len(regexps)) + for idx, reg := range regexps { + re, err := regexp.Compile(reg) + if err != nil { + return nil, fmt.Errorf("unable to compile regexp %s: %v\n", reg, err) + } + + compiledRegexps[idx] = re + } + + return compiledRegexps, nil +} + +// compileGlobs compiles a set of globs, returning them for use, +// or the first encountered error due to an invalid expression. +func compileGlobs(globs []string) ([]glob.Glob, error) { + compiledGlobs := make([]glob.Glob, len(globs)) + for idx, globString := range globs { + glob, err := glob.Compile(globString) + if err != nil { + return nil, fmt.Errorf("unable to compile globs %s: %v\n", glob, err) + } + + compiledGlobs[idx] = glob + } + return compiledGlobs, nil +} diff --git a/vendor/github.com/ultraware/funlen/README.md b/vendor/github.com/ultraware/funlen/README.md index aaf348521c..af2187694e 100644 --- a/vendor/github.com/ultraware/funlen/README.md +++ b/vendor/github.com/ultraware/funlen/README.md @@ -1,9 +1,47 @@ # Funlen linter -Funlen is a linter that checks for long functions. It can checks both on the number of lines and the number of statements. +Funlen is a linter that checks for long functions. It can check both on the number of lines and the number of statements. The default limits are 60 lines and 40 statements. You can configure these. -## Installation guide +## Description + +The intent for the funlen linter is to fit a function within one screen. If you need to scroll through a long function, tracing variables back to their definition or even just finding matching brackets can become difficult. + +Besides checking lines there's also a separate check for the number of statements, which gives a clearer idea of how much is actually being done in a function. + +The default values are used internally, but might to be adjusted for your specific environment. + +## Installation Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. + +# Exclude for tests + +golangci-lint offers a way to exclude linters in certain cases. More info can be found here: https://golangci-lint.run/usage/configuration/#issues-configuration. + +## Disable funlen for \_test.go files + +You can utilize the issues configuration in `.golangci.yml` to exclude the funlen linter for all test files: + +```yaml +issues: + exclude-rules: + # disable funlen for all _test.go files + - path: _test.go + linters: + - funlen +``` + +## Disable funlen only for Test funcs + +If you want to keep funlen enabled for example in helper functions in test files but disable it specifically for Test funcs, you can use the following configuration: + +```yaml +issues: + exclude-rules: + # disable funlen for test funcs + - source: "^func Test" + linters: + - funlen +``` diff --git a/vendor/github.com/ultraware/funlen/main.go b/vendor/github.com/ultraware/funlen/main.go index 2ba3530027..b68ddb926f 100644 --- a/vendor/github.com/ultraware/funlen/main.go +++ b/vendor/github.com/ultraware/funlen/main.go @@ -13,7 +13,7 @@ const ( ) // Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Message { +func Run(file *ast.File, fset *token.FileSet, lineLimit int, stmtLimit int, ignoreComments bool) []Message { if lineLimit == 0 { lineLimit = defaultLineLimit } @@ -21,6 +21,8 @@ func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Messag stmtLimit = defaultStmtLimit } + cmap := ast.NewCommentMap(fset, file, file.Comments) + var msgs []Message for _, f := range file.Decls { decl, ok := f.(*ast.FuncDecl) @@ -36,7 +38,7 @@ func Run(file *ast.File, fset *token.FileSet, lineLimit, stmtLimit int) []Messag } if lineLimit > 0 { - if lines := getLines(fset, decl); lines > lineLimit { + if lines := getLines(fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) } } @@ -65,8 +67,26 @@ func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit } } -func getLines(fset *token.FileSet, f *ast.FuncDecl) int { // nolint: interfacer - return fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 +func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { // nolint: interfacer + var lineCount int + var commentCount int + + lineCount = fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 + + if !ignoreComments { + return lineCount + } + + for _, c := range cmap.Comments() { + // If the CommenGroup's lines are inside the function + // count how many comments are in the CommentGroup + if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && + (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { + commentCount += len(c.List) + } + } + + return lineCount - commentCount } func parseStmts(s []ast.Stmt) (total int) { diff --git a/vendor/github.com/ultraware/whitespace/README.md b/vendor/github.com/ultraware/whitespace/README.md index 2a88f13388..f99ecce36c 100644 --- a/vendor/github.com/ultraware/whitespace/README.md +++ b/vendor/github.com/ultraware/whitespace/README.md @@ -4,4 +4,6 @@ Whitespace is a linter that checks for unnecessary newlines at the start and end ## Installation guide -Whitespace is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. +To install as a standalone linter, run `go install github.com/ultraware/whitespace`. + +Whitespace is also included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. diff --git a/vendor/github.com/ultraware/whitespace/main.go b/vendor/github.com/ultraware/whitespace/main.go deleted file mode 100644 index d178ea2939..0000000000 --- a/vendor/github.com/ultraware/whitespace/main.go +++ /dev/null @@ -1,162 +0,0 @@ -package whitespace - -import ( - "go/ast" - "go/token" -) - -// Message contains a message -type Message struct { - Pos token.Position - Type MessageType - Message string -} - -// MessageType describes what should happen to fix the warning -type MessageType uint8 - -// List of MessageTypes -const ( - MessageTypeLeading MessageType = iota + 1 - MessageTypeTrailing - MessageTypeAddAfter -) - -// Settings contains settings for edge-cases -type Settings struct { - MultiIf bool - MultiFunc bool -} - -// Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, settings Settings) []Message { - var messages []Message - - for _, f := range file.Decls { - decl, ok := f.(*ast.FuncDecl) - if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo - continue - } - - vis := visitor{file.Comments, fset, nil, make(map[*ast.BlockStmt]bool), settings} - ast.Walk(&vis, decl) - - messages = append(messages, vis.messages...) - } - - return messages -} - -type visitor struct { - comments []*ast.CommentGroup - fset *token.FileSet - messages []Message - wantNewline map[*ast.BlockStmt]bool - settings Settings -} - -func (v *visitor) Visit(node ast.Node) ast.Visitor { - if node == nil { - return v - } - - if stmt, ok := node.(*ast.IfStmt); ok && v.settings.MultiIf { - checkMultiLine(v, stmt.Body, stmt.Cond) - } - - if stmt, ok := node.(*ast.FuncLit); ok && v.settings.MultiFunc { - checkMultiLine(v, stmt.Body, stmt.Type) - } - - if stmt, ok := node.(*ast.FuncDecl); ok && v.settings.MultiFunc { - checkMultiLine(v, stmt.Body, stmt.Type) - } - - if stmt, ok := node.(*ast.BlockStmt); ok { - wantNewline := v.wantNewline[stmt] - - comments := v.comments - if wantNewline { - comments = nil // Comments also count as a newline if we want a newline - } - first, last := firstAndLast(comments, v.fset, stmt.Pos(), stmt.End(), stmt.List) - - startMsg := checkStart(v.fset, stmt.Lbrace, first) - - if wantNewline && startMsg == nil { - v.messages = append(v.messages, Message{v.fset.Position(stmt.Pos()), MessageTypeAddAfter, `multi-line statement should be followed by a newline`}) - } else if !wantNewline && startMsg != nil { - v.messages = append(v.messages, *startMsg) - } - - if msg := checkEnd(v.fset, stmt.Rbrace, last); msg != nil { - v.messages = append(v.messages, *msg) - } - } - - return v -} - -func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { - start, end := posLine(v.fset, stmtStart.Pos()), posLine(v.fset, stmtStart.End()) - - if end > start { // Check only multi line conditions - v.wantNewline[body] = true - } -} - -func posLine(fset *token.FileSet, pos token.Pos) int { - return fset.Position(pos).Line -} - -func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, start, end token.Pos, stmts []ast.Stmt) (ast.Node, ast.Node) { - if len(stmts) == 0 { - return nil, nil - } - - first, last := ast.Node(stmts[0]), ast.Node(stmts[len(stmts)-1]) - - for _, c := range comments { - if posLine(fset, c.Pos()) == posLine(fset, start) || posLine(fset, c.End()) == posLine(fset, end) { - continue - } - - if c.Pos() < start || c.End() > end { - continue - } - if c.Pos() < first.Pos() { - first = c - } - if c.End() > last.End() { - last = c - } - } - - return first, last -} - -func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { - if first == nil { - return nil - } - - if posLine(fset, start)+1 < posLine(fset, first.Pos()) { - pos := fset.Position(start) - return &Message{pos, MessageTypeLeading, `unnecessary leading newline`} - } - - return nil -} - -func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { - if last == nil { - return nil - } - - if posLine(fset, end)-1 > posLine(fset, last.End()) { - pos := fset.Position(end) - return &Message{pos, MessageTypeTrailing, `unnecessary trailing newline`} - } - - return nil -} diff --git a/vendor/github.com/ultraware/whitespace/whitespace.go b/vendor/github.com/ultraware/whitespace/whitespace.go new file mode 100644 index 0000000000..350e9b7e4e --- /dev/null +++ b/vendor/github.com/ultraware/whitespace/whitespace.go @@ -0,0 +1,307 @@ +package whitespace + +import ( + "flag" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// MessageType describes what should happen to fix the warning. +type MessageType uint8 + +// List of MessageTypes. +const ( + MessageTypeRemove MessageType = iota + 1 + MessageTypeAdd +) + +// RunningMode describes the mode the linter is run in. This can be either +// native or golangci-lint. +type RunningMode uint8 + +const ( + RunningModeNative RunningMode = iota + RunningModeGolangCI +) + +// Message contains a message and diagnostic information. +type Message struct { + // Diagnostic is what position the diagnostic should be put at. This isn't + // always the same as the fix start, f.ex. when we fix trailing newlines we + // put the diagnostic at the right bracket but we fix between the end of the + // last statement and the bracket. + Diagnostic token.Pos + + // FixStart is the span start of the fix. + FixStart token.Pos + + // FixEnd is the span end of the fix. + FixEnd token.Pos + + // LineNumbers represent the actual line numbers in the file. This is set + // when finding the diagnostic to make it easier to suggest fixes in + // golangci-lint. + LineNumbers []int + + // MessageType represents the type of message it is. + MessageType MessageType + + // Message is the diagnostic to show. + Message string +} + +// Settings contains settings for edge-cases. +type Settings struct { + Mode RunningMode + MultiIf bool + MultiFunc bool +} + +// NewAnalyzer creates a new whitespace analyzer. +func NewAnalyzer(settings *Settings) *analysis.Analyzer { + if settings == nil { + settings = &Settings{} + } + + return &analysis.Analyzer{ + Name: "whitespace", + Doc: "Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc.", + Flags: flags(settings), + Run: func(p *analysis.Pass) (any, error) { + Run(p, settings) + return nil, nil + }, + RunDespiteErrors: true, + } +} + +func flags(settings *Settings) flag.FlagSet { + flags := flag.NewFlagSet("", flag.ExitOnError) + flags.BoolVar(&settings.MultiIf, "multi-if", settings.MultiIf, "Check that multi line if-statements have a leading newline") + flags.BoolVar(&settings.MultiFunc, "multi-func", settings.MultiFunc, "Check that multi line functions have a leading newline") + + return *flags +} + +func Run(pass *analysis.Pass, settings *Settings) []Message { + messages := []Message{} + + for _, file := range pass.Files { + filename := pass.Fset.Position(file.Pos()).Filename + if !strings.HasSuffix(filename, ".go") { + continue + } + + fileMessages := runFile(file, pass.Fset, *settings) + + if settings.Mode == RunningModeGolangCI { + messages = append(messages, fileMessages...) + continue + } + + for _, message := range fileMessages { + pass.Report(analysis.Diagnostic{ + Pos: message.Diagnostic, + Category: "whitespace", + Message: message.Message, + SuggestedFixes: []analysis.SuggestedFix{ + { + TextEdits: []analysis.TextEdit{ + { + Pos: message.FixStart, + End: message.FixEnd, + NewText: []byte("\n"), + }, + }, + }, + }, + }) + } + } + + return messages +} + +func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { + var messages []Message + + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + vis := visitor{file.Comments, fset, nil, make(map[*ast.BlockStmt]bool), settings} + ast.Walk(&vis, decl) + + messages = append(messages, vis.messages...) + } + + return messages +} + +type visitor struct { + comments []*ast.CommentGroup + fset *token.FileSet + messages []Message + wantNewline map[*ast.BlockStmt]bool + settings Settings +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + return v + } + + if stmt, ok := node.(*ast.IfStmt); ok && v.settings.MultiIf { + checkMultiLine(v, stmt.Body, stmt.Cond) + } + + if stmt, ok := node.(*ast.FuncLit); ok && v.settings.MultiFunc { + checkMultiLine(v, stmt.Body, stmt.Type) + } + + if stmt, ok := node.(*ast.FuncDecl); ok && v.settings.MultiFunc { + checkMultiLine(v, stmt.Body, stmt.Type) + } + + if stmt, ok := node.(*ast.BlockStmt); ok { + wantNewline := v.wantNewline[stmt] + + comments := v.comments + if wantNewline { + comments = nil // Comments also count as a newline if we want a newline + } + + opening, first, last := firstAndLast(comments, v.fset, stmt) + startMsg := checkStart(v.fset, opening, first) + + if wantNewline && startMsg == nil && len(stmt.List) >= 1 { + v.messages = append(v.messages, Message{ + Diagnostic: opening, + FixStart: stmt.List[0].Pos(), + FixEnd: stmt.List[0].Pos(), + LineNumbers: []int{v.fset.PositionFor(stmt.List[0].Pos(), false).Line}, + MessageType: MessageTypeAdd, + Message: "multi-line statement should be followed by a newline", + }) + } else if !wantNewline && startMsg != nil { + v.messages = append(v.messages, *startMsg) + } + + if msg := checkEnd(v.fset, stmt.Rbrace, last); msg != nil { + v.messages = append(v.messages, *msg) + } + } + + return v +} + +func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { + start, end := posLine(v.fset, stmtStart.Pos()), posLine(v.fset, stmtStart.End()) + + if end > start { // Check only multi line conditions + v.wantNewline[body] = true + } +} + +func posLine(fset *token.FileSet, pos token.Pos) int { + return fset.PositionFor(pos, false).Line +} + +func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.BlockStmt) (token.Pos, ast.Node, ast.Node) { + openingPos := stmt.Lbrace + 1 + + if len(stmt.List) == 0 { + return openingPos, nil, nil + } + + first, last := ast.Node(stmt.List[0]), ast.Node(stmt.List[len(stmt.List)-1]) + + for _, c := range comments { + // If the comment is on the same line as the opening pos (initially the + // left bracket) but it starts after the pos the comment must be after + // the bracket and where that comment ends should be considered where + // the fix should start. + if posLine(fset, c.Pos()) == posLine(fset, openingPos) && c.Pos() > openingPos { + if posLine(fset, c.End()) != posLine(fset, openingPos) { + // This is a multiline comment that spans from the `LBrace` line + // to a line further down. This should always be seen as ok! + first = c + } else { + openingPos = c.End() + } + } + + if posLine(fset, c.Pos()) == posLine(fset, stmt.Pos()) || posLine(fset, c.End()) == posLine(fset, stmt.End()) { + continue + } + + if c.Pos() < stmt.Pos() || c.End() > stmt.End() { + continue + } + + if c.Pos() < first.Pos() { + first = c + } + + if c.End() > last.End() { + last = c + } + } + + return openingPos, first, last +} + +func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { + if first == nil { + return nil + } + + if posLine(fset, start)+1 < posLine(fset, first.Pos()) { + return &Message{ + Diagnostic: start, + FixStart: start, + FixEnd: first.Pos(), + LineNumbers: linesBetween(fset, start, first.Pos()), + MessageType: MessageTypeRemove, + Message: "unnecessary leading newline", + } + } + + return nil +} + +func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { + if last == nil { + return nil + } + + if posLine(fset, end)-1 > posLine(fset, last.End()) { + return &Message{ + Diagnostic: end, + FixStart: last.End(), + FixEnd: end, + LineNumbers: linesBetween(fset, last.End(), end), + MessageType: MessageTypeRemove, + Message: "unnecessary trailing newline", + } + } + + return nil +} + +func linesBetween(fset *token.FileSet, a, b token.Pos) []int { + lines := []int{} + aPosition := fset.PositionFor(a, false) + bPosition := fset.PositionFor(b, false) + + for i := aPosition.Line + 1; i < bPosition.Line; i++ { + lines = append(lines, i) + } + + return lines +} diff --git a/vendor/github.com/uudashr/gocognit/README.md b/vendor/github.com/uudashr/gocognit/README.md index 1e028c7897..57f31cf740 100644 --- a/vendor/github.com/uudashr/gocognit/README.md +++ b/vendor/github.com/uudashr/gocognit/README.md @@ -1,6 +1,6 @@ [![GoDoc](https://godoc.org/github.com/uudashr/gocognit?status.svg)](https://godoc.org/github.com/uudashr/gocognit) # Gocognit -Gocognit calculates cognitive complexities of functions in Go source code. A measurement of how hard does the code is intuitively to understand. +Gocognit calculates cognitive complexities of functions (and methods) in Go source code. A measurement of how hard does the code is intuitively to understand. ## Understanding the complexity @@ -37,10 +37,10 @@ func GetWords(number int) string { As you see above codes are the same, but the second code are easier to understand, that is why the cognitive complexity score are lower compare to the first one. -## Comparison with cyclometic complexity +## Comparison with cyclomatic complexity ### Example 1 -#### Cyclometic complexity +#### Cyclomatic complexity ```go func GetWords(number int) string { // +1 switch number { @@ -160,16 +160,40 @@ $ go get github.com/uudashr/gocognit/cmd/gocognit ``` $ gocognit Calculate cognitive complexities of Go functions. + Usage: - gocognit [flags] ... + + gocognit [ ...] ... + Flags: - -over N show functions with complexity > N only and - return exit code 1 if the set is non-empty - -top N show the top N most complex functions only - -avg show the average complexity over all functions, - not depending on whether -over or -top are set -The output fields for each line are: - + + -over N show functions with complexity > N only + and return exit code 1 if the output is non-empty + -top N show the top N most complex functions only + -avg show the average complexity over all functions, + not depending on whether -over or -top are set + -json encode the output as JSON + -f format string the format to use + (default "{{.PkgName}}.{{.FuncName}}:{{.Complexity}}:{{.Pos}}") + +The (default) output fields for each line are: + + + +The (default) output fields for each line are: + + {{.Complexity}} {{.PkgName}} {{.FuncName}} {{.Pos}} + +or equal to + +The struct being passed to the template is: + + type Stat struct { + PkgName string + FuncName string + Complexity int + Pos token.Position + } ``` Examples: @@ -180,6 +204,7 @@ $ gocognit main.go $ gocognit -top 10 src/ $ gocognit -over 25 docker $ gocognit -avg . +$ gocognit -ignore "_test|testdata" . ``` The output fields for each line are: @@ -187,6 +212,15 @@ The output fields for each line are: ``` +## Ignore individual functions +Ignore individual functions by specifying `gocognit:ignore` directive. +```go +//gocognit:ignore +func IgnoreMe() { + // ... +} +``` + ## Related project - [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. - [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. \ No newline at end of file diff --git a/vendor/github.com/uudashr/gocognit/gocognit.go b/vendor/github.com/uudashr/gocognit/gocognit.go index 1d539ee780..2bba2eb4f0 100644 --- a/vendor/github.com/uudashr/gocognit/gocognit.go +++ b/vendor/github.com/uudashr/gocognit/gocognit.go @@ -26,6 +26,11 @@ func (s Stat) String() string { func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { for _, decl := range f.Decls { if fn, ok := decl.(*ast.FuncDecl); ok { + d := parseDirective(fn.Doc) + if d.Ignore { + continue + } + stats = append(stats, Stat{ PkgName: f.Name.Name, FuncName: funcName(fn), @@ -37,6 +42,24 @@ func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { return stats } +type directive struct { + Ignore bool +} + +func parseDirective(doc *ast.CommentGroup) directive { + if doc == nil { + return directive{} + } + + for _, c := range doc.List { + if c.Text == "//gocognit:ignore" { + return directive{Ignore: true} + } + } + + return directive{} +} + // funcName returns the name representation of a function or method: // "(Type).Name" for methods or simply "Name" for functions. func funcName(fn *ast.FuncDecl) string { @@ -272,7 +295,7 @@ func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { } func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { - if (n.Op == token.LAND || n.Op == token.LOR) && !v.isCalculated(n) { + if isBinaryLogicalOp(n.Op) && !v.isCalculated(n) { ops := v.collectBinaryOps(n) var lastOp token.Token @@ -299,15 +322,10 @@ func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { v.markCalculated(exp) - switch exp := exp.(type) { - case *ast.BinaryExpr: + if exp, ok := exp.(*ast.BinaryExpr); ok { return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) - case *ast.ParenExpr: - // interest only on what inside paranthese - return v.collectBinaryOps(exp.X) - default: - return []token.Token{} } + return nil } func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { @@ -320,16 +338,18 @@ func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { var out []token.Token - if len(x) != 0 { - out = append(out, x...) - } - out = append(out, op) - if len(y) != 0 { - out = append(out, y...) + out = append(out, x...) + if isBinaryLogicalOp(op) { + out = append(out, op) } + out = append(out, y...) return out } +func isBinaryLogicalOp(op token.Token) bool { + return op == token.LAND || op == token.LOR +} + const Doc = `Find complex function using cognitive complexity calculation. The gocognit analysis reports functions or methods which the complexity is over @@ -359,13 +379,19 @@ func run(pass *analysis.Pass) (interface{}, error) { (*ast.FuncDecl)(nil), } inspect.Preorder(nodeFilter, func(n ast.Node) { - fnDecl := n.(*ast.FuncDecl) + funcDecl := n.(*ast.FuncDecl) + + d := parseDirective(funcDecl.Doc) + if d.Ignore { + return + } + + fnName := funcName(funcDecl) - fnName := funcName(fnDecl) - fnComplexity := Complexity(fnDecl) + fnComplexity := Complexity(funcDecl) if fnComplexity > over { - pass.Reportf(fnDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over) + pass.Reportf(funcDecl.Pos(), "cognitive complexity %d of func %s is high (> %d)", fnComplexity, fnName, over) } }) diff --git a/vendor/github.com/xen0n/gosmopolitan/.editorconfig b/vendor/github.com/xen0n/gosmopolitan/.editorconfig new file mode 100644 index 0000000000..0c0f7e7e28 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig is awesome: http://EditorConfig.org + +root = true + +[*] +indent_style = space +trim_trailing_whitespace = true +end_of_line = lf +insert_final_newline = true +charset = utf-8 + +[{*.sh,*.md}] +indent_size = 4 + +[{*.yaml,*.yml}] +indent_size = 2 + +# hard tabs for Go and Makefile per best practice of file format +[*.go] +indent_style = tab + +[Makefile] +indent_style = tab diff --git a/vendor/github.com/xen0n/gosmopolitan/.gitignore b/vendor/github.com/xen0n/gosmopolitan/.gitignore new file mode 100644 index 0000000000..1a1abaa202 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/.gitignore @@ -0,0 +1,188 @@ +# ignore the local build artifact +/gosmopolitan + +# and test artifacts +/coverage.txt + +# the following are auto-generated + +# Created by https://www.toptal.com/developers/gitignore/api/go,visualstudiocode,vim,goland +# Edit at https://www.toptal.com/developers/gitignore?templates=go,visualstudiocode,vim,goland + +### Go ### +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +### GoLand ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# SonarLint plugin +.idea/sonarlint/ + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### GoLand Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +# End of https://www.toptal.com/developers/gitignore/api/go,visualstudiocode,vim,goland diff --git a/vendor/github.com/xen0n/gosmopolitan/.golangci.yml b/vendor/github.com/xen0n/gosmopolitan/.golangci.yml new file mode 100644 index 0000000000..0bce12501a --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/.golangci.yml @@ -0,0 +1,31 @@ +run: + go: '1.19' + modules-download-mode: readonly + +linters: + enable: + - goheader + - goimports + - gosec + - gosimple + - lll + - nakedret + - revive + - stylecheck + - unused + +linters-settings: + goheader: + template: |- + SPDX-License-Identifier: GPL-3.0-or-later + goimports: + local-prefixes: github.com/xen0n/gosmopolitan + gosimple: + go: '1.19' + lll: + line-length: 120 + tab-width: 4 + nakedret: + max-func-lines: 1 + stylecheck: + go: '1.19' diff --git a/vendor/github.com/xen0n/gosmopolitan/LICENSE b/vendor/github.com/xen0n/gosmopolitan/LICENSE new file mode 100644 index 0000000000..94a9ed024d --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/github.com/xen0n/gosmopolitan/README.md b/vendor/github.com/xen0n/gosmopolitan/README.md new file mode 100644 index 0000000000..86a5e64e00 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/README.md @@ -0,0 +1,84 @@ +# gosmopolitan + +![GitHub Workflow Status (main branch)](https://img.shields.io/github/actions/workflow/status/xen0n/gosmopolitan/go.yml?branch=main) +![Codecov](https://img.shields.io/codecov/c/gh/xen0n/gosmopolitan) +![GitHub license info](https://img.shields.io/github/license/xen0n/gosmopolitan) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/xen0n/gosmopolitan) +[![Go Report Card](https://goreportcard.com/badge/github.com/xen0n/gosmopolitan)](https://goreportcard.com/report/github.com/xen0n/gosmopolitan) +[![Go Reference](https://pkg.go.dev/badge/github.com/xen0n/gosmopolitan.svg)](https://pkg.go.dev/github.com/xen0n/gosmopolitan) + +[简体中文](./README.zh-Hans.md) + +`gosmopolitan` checks your Go codebase for code smells that may prove to be +hindrance to internationalization ("i18n") and/or localization ("l10n"). + +The name is a wordplay on "cosmopolitan". + +## Checks + +Currently `gosmopolitan` checks for the following anti-patterns: + +* Occurrences of string literals containing characters from certain writing + systems. + + Existence of such strings often means the relevant logic is hard to + internationalize, or at least, require special care when doing i18n/l10n. + +* Usages of `time.Local`. + + An internationalized app or library should almost never process time and + date values in the timezone in which it is running; instead one should use + the respective user preference, or the timezone as dictated by the domain + logic. + +Note that local times are produced in a lot more ways than via direct casts to +`time.Local` alone, such as: + +* `time.LoadLocation("Local")` +* received from a `time.Ticker` +* functions explicitly documented to return local times + * `time.Now()` + * `time.Unix()` + * `time.UnixMilli()` + * `time.UnixMicro()` + +Proper identification of these use cases require a fairly complete dataflow +analysis pass, which is not implemented currently. In addition, right now you +have to pay close attention to externally-provided time values (such as from +your framework like Gin or gRPC) as they are not properly tracked either. + +## Caveats + +Note that the checks implemented here are only suitable for codebases with the +following characteristics, and may not suit your particular project's needs: + +* Originally developed for an audience using non-Latin writing system(s), +* Returns bare strings intended for humans containing such non-Latin characters, and +* May occasionally (or frequently) refer to the system timezone, but is + architecturally forbidden/discouraged to just treat the system timezone as + the reference timezone. + +For example, the lints may prove valuable if you're revamping a web service +originally targetting the Chinese market (hence producing strings with Chinese +characters all over the place) to be more i18n-aware. Conversely, if you want +to identify some of the i18n-naïve places in an English-only app, the linter +will output nothing. + +## golangci-lint integration + +`gosmopolitan` support [has been merged][gcl-pr] into [`golangci-lint`][gcl-home], +and will be usable out-of-the-box in golangci-lint v1.53.0 or later. + +Due to the opinionated coding style this linter advocates and checks for, if +you have `enable-all: true` in your `golangci.yml` and your project deals a +lot with Chinese text and/or `time.Local`, then you'll get flooded with lints +when you upgrade to golangci-lint v1.53.0. Just disable this linter (and +better yet, move away from `enable-all: true`) if the style does not suit your +specific use case. + +[gcl-pr]: https://github.com/golangci/golangci-lint/pull/3458 +[gcl-home]: https://golangci-lint.run + +## License + +`gosmopolitan` is licensed under the GPL license, version 3 or later. diff --git a/vendor/github.com/xen0n/gosmopolitan/README.zh-Hans.md b/vendor/github.com/xen0n/gosmopolitan/README.zh-Hans.md new file mode 100644 index 0000000000..682d10880e --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/README.zh-Hans.md @@ -0,0 +1,69 @@ +# gosmopolitan + +![GitHub Workflow Status (main branch)](https://img.shields.io/github/actions/workflow/status/xen0n/gosmopolitan/go.yml?branch=main) +![Codecov](https://img.shields.io/codecov/c/gh/xen0n/gosmopolitan) +![GitHub license info](https://img.shields.io/github/license/xen0n/gosmopolitan) +![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/xen0n/gosmopolitan) +[![Go Report Card](https://goreportcard.com/badge/github.com/xen0n/gosmopolitan)](https://goreportcard.com/report/github.com/xen0n/gosmopolitan) +[![Go Reference](https://pkg.go.dev/badge/github.com/xen0n/gosmopolitan.svg)](https://pkg.go.dev/github.com/xen0n/gosmopolitan) + +[English](./README.md) + +用 `gosmopolitan` 检查你的 Go 代码库里有没有国际化(“i18n“)或者本地化(”l10n“)的阻碍。 + +项目名字来自“cosmopolitan”的文字游戏。 + +## 检查 + +`gosmopolitan` 目前会检查以下的反模式(anti-patterns): + +* 含有来自特定书写系统字符的字符串字面量(string literals)。 + + 项目中存在这种字符串,通常意味着相关的逻辑不便于国际化,或者至少在国际化/本地化适配过程中会涉及特殊对待。 + +* `time.Local` 的使用。 + + 支持国际化的应用或程序库,几乎永远不应以程序当前运行环境的时区来处理时间、日期数据。 + 相反,在这种场景下,开发者应该使用相应的用户偏好,或者按照领域逻辑确定应该使用的时区。 + +注意:除了直接向 `time.Local` 转换之外,还有很多其他写法会产生本地时区的时刻,例如: + +* `time.LoadLocation("Local")` +* 从 `time.Ticker` 收到的值 +* 文档中明确了会返回本地时刻的函数 + * `time.Now()` + * `time.Unix()` + * `time.UnixMilli()` + * `time.UnixMicro()` + +为了正确识别这些使用场景,需要有一个相当完善的数据流分析 pass,目前还没实现。 +此外,当前您还需要自行密切注意从外部传入的时刻值(例如从您使用的 Gin 或 gRPC +之类框架传来的那些),因为这些值当前也没有被正确跟踪。 + +## 注意事项 + +请注意,本库中实现的检查仅适用于具有以下性质的代码库,因此可能不适用于您的具体场景: + +* 项目原先是为使用非拉丁字母书写系统的受众群体开发的, +* 项目会返回包含这些非拉丁字母字符的裸的字符串(即,未经处理或变换的), +* 项目可能偶尔(或者经常)引用程序当前运行环境的系统时区,但项目架构上禁止或不建议把系统时区直接作为业务参考时区使用。 + +举个例子:如果您在翻新一个本来面向中国用户群体(因此到处都在产生含有汉字的字符串)的 +web 服务,以使其更加国际化,这里的 lints 可能会很有价值。 +反之,如果您想在一个仅支持英语的应用里,寻找其中不利于国际化的那部分写法,本 +linter 则什么都不会输出。 + +## 与 golangci-lint 集成 + +`gosmopolitan` 支持[已经被合并][gcl-pr]入 [`golangci-lint`][gcl-home] 上游,在 golangci-lint v1.53.0 及以后的版本可以开箱即用。 + +[gcl-pr]: https://github.com/golangci/golangci-lint/pull/3458 +[gcl-home]: https://golangci-lint.run + +由于本 linter 倡导和检查的代码风格带有鲜明立场,如果您在 `golangci.yml` 开了 +`enable-all: true` 并且您的项目处理很多中文文本或者 `time.Local`,那么您一旦升级到 +golangci-lint v1.53.0 就将被 lints 淹没。如果这种代码风格不适合您的具体使用场景,直接禁用本 linter(或者更彻底一些,不要 `enable-all: true` 了)就好。 + +## 许可证 + +`gosmopolitan` 以 GPL v3 或更新的版本许可使用。 diff --git a/vendor/github.com/xen0n/gosmopolitan/lib.go b/vendor/github.com/xen0n/gosmopolitan/lib.go new file mode 100644 index 0000000000..67b1151c71 --- /dev/null +++ b/vendor/github.com/xen0n/gosmopolitan/lib.go @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package gosmopolitan + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "regexp" + "strings" + "unicode" + + "golang.org/x/text/runes" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const analyzerName = "gosmopolitan" +const analyzerDoc = "Report certain i18n/l10n anti-patterns in your Go codebase" + +type AnalyzerConfig struct { + // LookAtTests is flag controlling whether the lints are going to look at + // test files, despite other config knobs of the Go analysis tooling + // framework telling us otherwise. + // + // By default gosmopolitan does not look at test files, because i18n-aware + // apps most probably have many unmarked strings in test cases, and names + // and descriptions *of* test cases are probably in the program's original + // natural language too. + LookAtTests bool + // EscapeHatches is optionally a list of fully qualified names, in the + // `(full/pkg/path).name` form, to act as "i18n escape hatches". Inside + // call-like expressions to those names, the string literal script check + // is ignored. + // + // With this functionality in place, you can use type aliases like + // `type R = string` as markers, or have explicitly i18n-aware functions + // exempt from the checks. + EscapeHatches []string + // WatchForScripts is optionally a list of Unicode script names to watch + // for any usage in string literals. The range of supported scripts is + // determined by the [unicode.Scripts] map and values are case-sensitive. + WatchForScripts []string + // AllowTimeLocal is flag controlling whether usages of [time.Local] are + // allowed (i.e. not reported). + AllowTimeLocal bool +} + +func NewAnalyzer() *analysis.Analyzer { + var lookAtTests bool + var escapeHatchesStr string + var watchForScriptsStr string + var allowTimeLocal bool + + a := &analysis.Analyzer{ + Name: analyzerName, + Doc: analyzerDoc, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: func(p *analysis.Pass) (any, error) { + cfg := AnalyzerConfig{ + LookAtTests: lookAtTests, + EscapeHatches: strings.Split(escapeHatchesStr, ","), + WatchForScripts: strings.Split(watchForScriptsStr, ","), + AllowTimeLocal: allowTimeLocal, + } + pctx := processCtx{cfg: &cfg, p: p} + return pctx.run() + }, + RunDespiteErrors: false, + } + + a.Flags.BoolVar(&lookAtTests, + "lookattests", + false, + "also check the test files", + ) + a.Flags.StringVar( + &escapeHatchesStr, + "escapehatches", + "", + "comma-separated list of fully qualified names to act as i18n escape hatches", + ) + a.Flags.StringVar( + &watchForScriptsStr, + "watchforscripts", + "Han", + "comma-separated list of Unicode scripts to watch out for occurrence in string literals", + ) + a.Flags.BoolVar(&allowTimeLocal, + "allowtimelocal", + false, + "allow time.Local usages", + ) + + return a +} + +func NewAnalyzerWithConfig(cfg *AnalyzerConfig) *analysis.Analyzer { + return &analysis.Analyzer{ + Name: analyzerName, + Doc: analyzerDoc, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, + Run: func(p *analysis.Pass) (any, error) { + pctx := processCtx{cfg: cfg, p: p} + return pctx.run() + }, + RunDespiteErrors: false, + } +} + +var DefaultAnalyzer = NewAnalyzer() + +func validateUnicodeScriptName(name string) error { + if _, ok := unicode.Scripts[name]; !ok { + return fmt.Errorf("invalid Unicode script name: %s", name) + } + return nil +} + +// example input: ["Han", "Arabic"] +// example output: `\p{Han}|\p{Arabic}` +// assumes len(scriptNames) > 0 +func makeUnicodeScriptMatcherRegexpString(scriptNames []string) string { + var sb strings.Builder + for i, s := range scriptNames { + if i > 0 { + sb.WriteRune('|') + } + sb.WriteString(`\p{`) + sb.WriteString(s) + sb.WriteRune('}') + } + return sb.String() +} + +func makeUnicodeScriptMatcherRegexp(scriptNames []string) (*regexp.Regexp, error) { + return regexp.Compile(makeUnicodeScriptMatcherRegexpString(scriptNames)) +} + +type processCtx struct { + cfg *AnalyzerConfig + p *analysis.Pass +} + +func mapSlice[T any, U any](x []T, fn func(T) U) []U { + if x == nil { + return nil + } + y := make([]U, len(x)) + for i, v := range x { + y[i] = fn(v) + } + return y +} + +func sliceToSet[T comparable](x []T) map[T]struct{} { + // lo.SliceToMap(x, func(k T) (T, struct{}) { return k, struct{}{} }) + y := make(map[T]struct{}, len(x)) + for _, k := range x { + y[k] = struct{}{} + } + return y +} + +func getFullyQualifiedName(x types.Object) string { + pkg := x.Pkg() + if pkg == nil { + return x.Name() + } + return fmt.Sprintf("%s.%s", pkg.Path(), x.Name()) +} + +// if input is in the "(%s).%s" form, remove the parens, else return the +// unchanged input +// +// this is for maintaining compatibility with the previous FQN notation that +// was born out of my confusion (the previous notation, while commonly seen, +// seems to be only for methods or pointer receiver types; the parens-less +// form is in fact unambiguous, because Go identifiers can't contain periods.) +func unquoteInputFQN(x string) string { + if len(x) == 0 || x[0] != '(' { + return x + } + + before, after, found := strings.Cut(x[1:], ")") + if !found { + // malformed input: string in "(xxxxx" form with unclosed parens! + // in this case, only removing the opening parens might be better than + // doing nothing after all + return x[1:] + } + + // at this point, + // input: "(foo).bar" + // before: "foo" + // after: ".bar" + return before + after +} + +func (c *processCtx) run() (any, error) { + escapeHatchesSet := sliceToSet(mapSlice(c.cfg.EscapeHatches, unquoteInputFQN)) + + if len(c.cfg.WatchForScripts) == 0 { + c.cfg.WatchForScripts = []string{"Han"} + } + + for _, s := range c.cfg.WatchForScripts { + if err := validateUnicodeScriptName(s); err != nil { + return nil, err + } + } + + charRE, err := makeUnicodeScriptMatcherRegexp(c.cfg.WatchForScripts) + if err != nil { + return nil, err + } + + usq := newUnicodeScriptQuerier(c.cfg.WatchForScripts) + + insp := c.p.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + // support ignoring the test files, because test files could be full of + // i18n and l10n fixtures, and we want to focus on the actual run-time + // logic + // + // TODO: is there a way to both ignore test files earlier, and make use of + // inspect.Analyzer's cached results? currently Inspector doesn't provide + // a way to selectively travese some files' AST but not others. + isBelongingToTestFiles := func(n ast.Node) bool { + return strings.HasSuffix(c.p.Fset.File(n.Pos()).Name(), "_test.go") + } + + shouldSkipTheContainingFile := func(n ast.Node) bool { + if c.cfg.LookAtTests { + return false + } + return isBelongingToTestFiles(n) + } + + insp.Nodes(nil, func(n ast.Node, push bool) bool { + // we only need to look at each node once + if !push { + return false + } + + if shouldSkipTheContainingFile(n) { + return false + } + + // skip blocks that can contain string literals but are not otherwise + // interesting for us + switch n.(type) { + case *ast.ImportSpec, *ast.TypeSpec: + // import blocks, type declarations + return false + } + + // and don't look inside escape hatches + referentFQN := c.getFullyQualifiedNameOfReferent(n) + if referentFQN != "" { + _, isEscapeHatch := escapeHatchesSet[referentFQN] + // if isEscapeHatch: don't recurse (false) + return !isEscapeHatch + } + + // check only string literals + lit, ok := n.(*ast.BasicLit) + if !ok { + return true + } + if lit.Kind != token.STRING { + return true + } + + // report string literals containing characters of given script (in + // the sense of "writing system") + if charRE.MatchString(lit.Value) { + match := charRE.FindIndex([]byte(lit.Value)) + matchCh := []byte(lit.Value)[match[0]:match[1]] + scriptName := usq.queryScriptForRuneBytes(matchCh) + + c.p.Report(analysis.Diagnostic{ + Pos: lit.Pos() + token.Pos(match[0]), + End: lit.Pos() + token.Pos(match[1]), + Message: fmt.Sprintf("string literal contains rune in %s script", scriptName), + }) + } + + return true + }) + + if !c.cfg.AllowTimeLocal { + // check time.Local usages + insp.Nodes([]ast.Node{(*ast.Ident)(nil)}, func(n ast.Node, push bool) bool { + // we only need to look at each node once + if !push { + return false + } + + if shouldSkipTheContainingFile(n) { + return false + } + + ident := n.(*ast.Ident) + + d := c.p.TypesInfo.ObjectOf(ident) + if d == nil || d.Pkg() == nil { + return true + } + + if d.Pkg().Path() == "time" && d.Name() == "Local" { + c.p.Report(analysis.Diagnostic{ + Pos: n.Pos(), + End: n.End(), + Message: "usage of time.Local", + }) + } + + return true + }) + } + + return nil, nil +} + +func (c *processCtx) getFullyQualifiedNameOfReferent(n ast.Node) string { + var ident *ast.Ident + switch e := n.(type) { + case *ast.CallExpr: + ident = getIdentOfTypeOfExpr(e.Fun) + + case *ast.CompositeLit: + ident = getIdentOfTypeOfExpr(e.Type) + + default: + return "" + } + + referent := c.p.TypesInfo.Uses[ident] + if referent == nil { + return "" + } + + return getFullyQualifiedName(referent) +} + +func getIdentOfTypeOfExpr(e ast.Expr) *ast.Ident { + switch x := e.(type) { + case *ast.Ident: + return x + case *ast.SelectorExpr: + return x.Sel + } + return nil +} + +type unicodeScriptQuerier struct { + sets map[string]runes.Set +} + +func newUnicodeScriptQuerier(scriptNames []string) *unicodeScriptQuerier { + sets := make(map[string]runes.Set, len(scriptNames)) + for _, s := range scriptNames { + sets[s] = runes.In(unicode.Scripts[s]) + } + return &unicodeScriptQuerier{ + sets: sets, + } +} + +func (x *unicodeScriptQuerier) queryScriptForRuneBytes(b []byte) string { + r := []rune(string(b))[0] + for s, set := range x.sets { + if set.Contains(r) { + return s + } + } + return "" +} diff --git a/vendor/github.com/yagipy/maintidx/.gitignore b/vendor/github.com/yagipy/maintidx/.gitignore new file mode 100644 index 0000000000..a676215fa9 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/.gitignore @@ -0,0 +1,2 @@ +.idea +bin diff --git a/vendor/github.com/yagipy/maintidx/LICENSE b/vendor/github.com/yagipy/maintidx/LICENSE new file mode 100644 index 0000000000..b94c2ede81 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Hiroyuki Yagihashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/yagipy/maintidx/Makefile b/vendor/github.com/yagipy/maintidx/Makefile new file mode 100644 index 0000000000..14b8fc9797 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/Makefile @@ -0,0 +1,2 @@ +build: + go build -o bin/maintidx ./cmd/maintidx diff --git a/vendor/github.com/yagipy/maintidx/README.md b/vendor/github.com/yagipy/maintidx/README.md new file mode 100644 index 0000000000..8d5e26df08 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/README.md @@ -0,0 +1,45 @@ +# maintidx +`maintidx` measures the maintainability index of each function. +https://docs.microsoft.com/en-us/visualstudio/code-quality/code-metrics-maintainability-index-range-and-meaning + +## Installation +### Go version < 1.16 +```shell +go get -u github.com/yagipy/maintidx/cmd/maintidx +``` + +### Go version 1.16+ +```shell +go install github.com/yagipy/maintidx/cmd/maintidx +``` + +## Usage +### standalone +```shell +maintidx ./... +``` + +### with go run +No installation required +```shell +go run github.com/yagipy/maintidx/cmd/maintidx ./... +``` + +### with go vet +```shell +go vet -vettool=`which maintidx` ./... +``` + +## Flag +```shell +Flags: + -under int + show functions with maintainability index < N only. (default 20) +``` + +## TODO +- [ ] Setup execute env on container +- [ ] Impl cyc.Cyc.Calc() +- [ ] Move maintidx.Visitor.PrintHalstVol to halstval package +- [ ] Consider the necessity of halstvol.incrIfAllTrue +- [ ] Test under pkg file diff --git a/vendor/github.com/yagipy/maintidx/maintidx.go b/vendor/github.com/yagipy/maintidx/maintidx.go new file mode 100644 index 0000000000..31ad9ca0c2 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/maintidx.go @@ -0,0 +1,63 @@ +package maintidx + +import ( + "go/ast" + "go/token" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const doc = "maintidx measures the maintainability index of each function." + +var Analyzer = &analysis.Analyzer{ + Name: "maintidx", + Doc: doc, + Run: run, + Requires: []*analysis.Analyzer{ + inspect.Analyzer, + }, +} + +var under int + +func init() { + Analyzer.Flags.IntVar(&under, "under", 20, "show functions with maintainability index < N only.") +} + +func run(pass *analysis.Pass) (interface{}, error) { + i := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + i.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl: + v := analyze(n) + + v.Coef.Cyc.Calc() + v.Coef.HalstVol.Calc() + v.calc(loc(pass.Fset, n)) + if v.MaintIdx < under { + pass.Reportf(n.Pos(), "Function name: %v, Cyclomatic Complexity: %v, Halstead Volume: %0.2f, Maintainability Index: %v", n.Name, v.Coef.Cyc.Val, v.Coef.HalstVol.Val, v.MaintIdx) + } + } + }) + + return nil, nil +} + +func analyze(n ast.Node) Visitor { + v := NewVisitor() + ast.Walk(v, n) + return *v +} + +func loc(fs *token.FileSet, n *ast.FuncDecl) int { + f := fs.File(n.Pos()) + startLine := f.Line(n.Pos()) + endLine := f.Line(n.End()) + return endLine - startLine + 1 +} diff --git a/vendor/github.com/yagipy/maintidx/pkg/cyc/cyc.go b/vendor/github.com/yagipy/maintidx/pkg/cyc/cyc.go new file mode 100644 index 0000000000..9ea009106b --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/pkg/cyc/cyc.go @@ -0,0 +1,36 @@ +package cyc + +import ( + "go/ast" + "go/token" +) + +type Cyc struct { + Val int + Coef Coef +} + +type Coef struct{} + +func (c *Cyc) Analyze(n ast.Node) { + switch n := n.(type) { + case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt: + c.Val++ + case *ast.CaseClause: + if n.List != nil { + c.Val++ + } + case *ast.CommClause: + if n.Comm != nil { + c.Val++ + } + case *ast.BinaryExpr: + if n.Op == token.LAND || n.Op == token.LOR { + c.Val++ + } + } +} + +// TODO: Implement +func (c *Cyc) Calc() { +} diff --git a/vendor/github.com/yagipy/maintidx/pkg/halstvol/halstvol.go b/vendor/github.com/yagipy/maintidx/pkg/halstvol/halstvol.go new file mode 100644 index 0000000000..f0212759b1 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/pkg/halstvol/halstvol.go @@ -0,0 +1,71 @@ +package halstvol + +import ( + "go/ast" + "math" +) + +type HalstVol struct { + Val float64 + Coef Coef +} + +type Coef struct { + Opt map[string]int + Opd map[string]int +} + +func (v *HalstVol) Analyze(n ast.Node) { + switch n := n.(type) { + case *ast.FuncDecl, *ast.GenDecl: + v.handleDecl(n) + case *ast.ParenExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, *ast.CallExpr, *ast.StarExpr, *ast.UnaryExpr, *ast.BinaryExpr, *ast.KeyValueExpr: + v.handleExpr(n) + case *ast.BasicLit, *ast.CompositeLit: + v.handleLit(n) + case *ast.Ident: + v.handleIdent(n) + case *ast.Ellipsis: + incrIfAllTrue(v.Coef.Opt, "...", []bool{n.Ellipsis.IsValid()}) + case *ast.FuncType: + incrIfAllTrue(v.Coef.Opt, "func", []bool{n.Func.IsValid()}) + v.Coef.Opt["()"]++ + case *ast.ChanType: + incrIfAllTrue(v.Coef.Opt, "chan", []bool{n.Begin.IsValid()}) + incrIfAllTrue(v.Coef.Opt, "<-", []bool{n.Arrow.IsValid()}) + case *ast.SendStmt, *ast.IncDecStmt, *ast.AssignStmt, *ast.GoStmt, *ast.DeferStmt, *ast.ReturnStmt, *ast.BranchStmt, *ast.BlockStmt, *ast.IfStmt, *ast.SwitchStmt, *ast.SelectStmt, *ast.ForStmt, *ast.RangeStmt: + v.handleStmt(n) + case *ast.CaseClause: + v.handleCaseClause(n) + } +} + +func (v *HalstVol) Calc() { + distOpt := len(v.Coef.Opt) + distOpd := len(v.Coef.Opd) + + var sumOpt, sumOpd int + + for _, val := range v.Coef.Opt { + sumOpt += val + } + + for _, val := range v.Coef.Opd { + sumOpd += val + } + + vocab := distOpt + distOpd + length := sumOpt + sumOpd + + v.Val = float64(length) * math.Log2(float64(vocab)) +} + +// TODO: Consider the necessity +func incrIfAllTrue(coef map[string]int, sym string, cond []bool) { + for _, ok := range cond { + if !ok { + return + } + } + coef[sym]++ +} diff --git a/vendor/github.com/yagipy/maintidx/pkg/halstvol/handle.go b/vendor/github.com/yagipy/maintidx/pkg/halstvol/handle.go new file mode 100644 index 0000000000..9f5e33500d --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/pkg/halstvol/handle.go @@ -0,0 +1,151 @@ +package halstvol + +import "go/ast" + +func (v *HalstVol) handleDecl(decl ast.Node) { + switch n := decl.(type) { + case *ast.FuncDecl: + if n.Recv == nil { + // In the case of receiver functions, the function name is incremented in *ast.Ident + v.Coef.Opt[n.Name.Name]++ + } else { + v.Coef.Opt["()"]++ + } + case *ast.GenDecl: + if n.Lparen.IsValid() && n.Rparen.IsValid() { + v.Coef.Opt["()"]++ + } + + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } else { + v.Coef.Opd[n.Tok.String()]++ + } + } +} + +func (v *HalstVol) handleIdent(ident *ast.Ident) { + if ident.Obj == nil { + v.Coef.Opt[ident.Name]++ + } else { + if ident.Obj.Kind.String() != "func" { + v.Coef.Opd[ident.Name]++ + } + } +} + +func (v *HalstVol) handleLit(lit ast.Node) { + switch n := lit.(type) { + case *ast.BasicLit: + if n.Kind.IsLiteral() { + v.Coef.Opd[n.Value]++ + } else { + v.Coef.Opt[n.Value]++ + } + case *ast.CompositeLit: + incrIfAllTrue(v.Coef.Opt, "{}", []bool{n.Lbrace.IsValid(), n.Rbrace.IsValid()}) + } +} + +func (v *HalstVol) handleExpr(expr ast.Node) { + switch n := expr.(type) { + case *ast.ParenExpr: + incrIfAllTrue(v.Coef.Opt, "()", []bool{n.Lparen.IsValid(), n.Rparen.IsValid()}) + case *ast.IndexExpr: + incrIfAllTrue(v.Coef.Opt, "{}", []bool{n.Lbrack.IsValid(), n.Rbrack.IsValid()}) + case *ast.SliceExpr: + incrIfAllTrue(v.Coef.Opt, "[]", []bool{n.Lbrack.IsValid(), n.Rbrack.IsValid()}) + case *ast.TypeAssertExpr: + incrIfAllTrue(v.Coef.Opt, "()", []bool{n.Lparen.IsValid(), n.Rparen.IsValid()}) + case *ast.CallExpr: + incrIfAllTrue(v.Coef.Opt, "()", []bool{n.Lparen.IsValid(), n.Rparen.IsValid()}) + incrIfAllTrue(v.Coef.Opt, "...", []bool{n.Ellipsis != 0}) + case *ast.StarExpr: + incrIfAllTrue(v.Coef.Opt, "*", []bool{n.Star.IsValid()}) + case *ast.UnaryExpr: + if n.Op.IsOperator() { + v.Coef.Opt[n.Op.String()]++ + } else { + v.Coef.Opd[n.Op.String()]++ + } + case *ast.BinaryExpr: + v.Coef.Opt[n.Op.String()]++ + case *ast.KeyValueExpr: + incrIfAllTrue(v.Coef.Opt, ":", []bool{n.Colon.IsValid()}) + } +} + +func (v *HalstVol) handleStmt(stmt ast.Node) { + switch n := stmt.(type) { + case *ast.SendStmt: + incrIfAllTrue(v.Coef.Opt, "<-", []bool{n.Arrow.IsValid()}) + case *ast.IncDecStmt: + incrIfAllTrue(v.Coef.Opt, n.Tok.String(), []bool{n.Tok.IsOperator()}) + case *ast.AssignStmt: + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } + case *ast.GoStmt: + if n.Go.IsValid() { + v.Coef.Opt["go"]++ + } + case *ast.DeferStmt: + if n.Defer.IsValid() { + v.Coef.Opt["defer"]++ + } + case *ast.ReturnStmt: + if n.Return.IsValid() { + v.Coef.Opt["return"]++ + } + case *ast.BranchStmt: + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } else { + v.Coef.Opd[n.Tok.String()]++ + } + case *ast.BlockStmt: + if n.Lbrace.IsValid() && n.Rbrace.IsValid() { + v.Coef.Opt["{}"]++ + } + case *ast.IfStmt: + if n.If.IsValid() { + v.Coef.Opt["if"]++ + } + if n.Else != nil { + v.Coef.Opt["else"]++ + } + case *ast.SwitchStmt: + if n.Switch.IsValid() { + v.Coef.Opt["switch"]++ + } + case *ast.SelectStmt: + if n.Select.IsValid() { + v.Coef.Opt["select"]++ + } + case *ast.ForStmt: + if n.For.IsValid() { + v.Coef.Opt["for"]++ + } + case *ast.RangeStmt: + if n.For.IsValid() { + v.Coef.Opt["for"]++ + } + if n.Key != nil { + if n.Tok.IsOperator() { + v.Coef.Opt[n.Tok.String()]++ + } else { + v.Coef.Opd[n.Tok.String()]++ + } + } + v.Coef.Opt["range"]++ + } +} + +func (v *HalstVol) handleCaseClause(cc *ast.CaseClause) { + if cc.List == nil { + v.Coef.Opt["default"]++ + } + if cc.Colon.IsValid() { + v.Coef.Opt[":"]++ + } +} diff --git a/vendor/github.com/yagipy/maintidx/visitor.go b/vendor/github.com/yagipy/maintidx/visitor.go new file mode 100644 index 0000000000..e6f74c50d7 --- /dev/null +++ b/vendor/github.com/yagipy/maintidx/visitor.go @@ -0,0 +1,77 @@ +package maintidx + +import ( + "github.com/yagipy/maintidx/pkg/cyc" + "github.com/yagipy/maintidx/pkg/halstvol" + "go/ast" + "math" + "sort" +) + +type Visitor struct { + MaintIdx int + Coef Coef +} + +var _ ast.Visitor = &Visitor{} + +type Coef struct { + Cyc cyc.Cyc + HalstVol halstvol.HalstVol +} + +func NewVisitor() *Visitor { + return &Visitor{ + MaintIdx: 0, + Coef: Coef{ + Cyc: cyc.Cyc{ + Val: 1, + Coef: cyc.Coef{}, + }, + HalstVol: halstvol.HalstVol{ + Val: 0.0, + Coef: halstvol.Coef{ + Opt: map[string]int{}, + Opd: map[string]int{}, + }, + }, + }, + } +} + +func (v *Visitor) Visit(n ast.Node) ast.Visitor { + v.Coef.Cyc.Analyze(n) + v.Coef.HalstVol.Analyze(n) + return v +} + +// Calc https://docs.microsoft.com/ja-jp/archive/blogs/codeanalysis/maintainability-index-range-and-meaning +func (v *Visitor) calc(loc int) { + origVal := 171.0 - 5.2*math.Log(v.Coef.HalstVol.Val) - 0.23*float64(v.Coef.Cyc.Val) - 16.2*math.Log(float64(loc)) + normVal := int(math.Max(0.0, origVal*100.0/171.0)) + v.MaintIdx = normVal +} + +// TODO: Move halstvol package +func (v *Visitor) printHalstVol() { + sortedOpt := make([]string, len(v.Coef.HalstVol.Coef.Opt)) + sortedOpd := make([]string, len(v.Coef.HalstVol.Coef.Opd)) + optIndex := 0 + opdIndex := 0 + for key := range v.Coef.HalstVol.Coef.Opt { + sortedOpt[optIndex] = key + optIndex++ + } + for key := range v.Coef.HalstVol.Coef.Opd { + sortedOpd[opdIndex] = key + opdIndex++ + } + sort.Strings(sortedOpt) + sort.Strings(sortedOpd) + for _, val := range sortedOpt { + println("operators", val, v.Coef.HalstVol.Coef.Opt[val]) + } + for _, val := range sortedOpd { + println("operands", val, v.Coef.HalstVol.Coef.Opd[val]) + } +} diff --git a/vendor/github.com/ykadowak/zerologlint/.goreleaser.yaml b/vendor/github.com/ykadowak/zerologlint/.goreleaser.yaml new file mode 100644 index 0000000000..f3af3f2121 --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/.goreleaser.yaml @@ -0,0 +1,24 @@ +before: + hooks: + - go mod tidy +builds: + - id: zerologlint + main: ./cmd/zerologlint + binary: zerologlint + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + - '^ci:' diff --git a/vendor/github.com/ykadowak/zerologlint/LICENSE b/vendor/github.com/ykadowak/zerologlint/LICENSE new file mode 100644 index 0000000000..92a1e3b318 --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Yusuke Kadowaki + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ykadowak/zerologlint/README.md b/vendor/github.com/ykadowak/zerologlint/README.md new file mode 100644 index 0000000000..b0a5fc0f9f --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/README.md @@ -0,0 +1,63 @@ +# zerologlint +![build](https://github.com/ykadowak/zerologlint/actions/workflows/testing.yaml/badge.svg) + +`zerologlint` is a linter for [zerolog](https://github.com/rs/zerolog) that can be run with `go vet` or through [golangci-lint](https://golangci-lint.run/) since `v1.53.0`. +It detects the wrong usage of `zerolog` that a user forgets to dispatch `zerolog.Event` with `Send` or `Msg` like functions, in which case nothing will be logged. For more detailed explanations of the cases it detects, see [Examples](#Example). + +## Install + +```bash +go install github.com/ykadowak/zerologlint/cmd/zerologlint@latest +``` + +## Usage +```bash +go vet -vettool=`which zerologlint` ./... +``` + +or you can also use it with [golangci-lint](https://golangci-lint.run/) since `v1.53.0`. + +## Examples +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // 1. Basic case + log.Info() // "must be dispatched by Msg or Send method" + + // 2. Nested case + log.Info(). // "must be dispatched by Msg or Send method" + Str("foo", "bar"). + Dict("dict", zerolog.Dict(). + Str("bar", "baz"). + Int("n", 1), + ) + + // 3. Reassignment case + logger := log.Info() // "must be dispatched by Msg or Send method" + if err != nil { + logger = log.Error() // "must be dispatched by Msg or Send method" + } + logger.Str("foo", "bar") + + // 4. Deferred case + defer log.Info() // "must be dispatched by Msg or Send method" + + // 5. zerolog.Logger case + logger2 := zerolog.New(os.Stdout) + logger2.Info().Send() + + // 6. Dispatch in other function case + event := log.Info() + dispatcher(event) +} + +func dispatcher(e *zerolog.Event) { + e.Send() +} +``` diff --git a/vendor/github.com/ykadowak/zerologlint/zerologlint.go b/vendor/github.com/ykadowak/zerologlint/zerologlint.go new file mode 100644 index 0000000000..8c8fb74fc3 --- /dev/null +++ b/vendor/github.com/ykadowak/zerologlint/zerologlint.go @@ -0,0 +1,261 @@ +package zerologlint + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" + + "github.com/gostaticanalysis/comment/passes/commentmap" +) + +var Analyzer = &analysis.Analyzer{ + Name: "zerologlint", + Doc: "Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg`", + Run: run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + commentmap.Analyzer, + }, +} + +type posser interface { + Pos() token.Pos +} + +// callDefer is an interface just to hold both ssa.Call and ssa.Defer in our set +type callDefer interface { + Common() *ssa.CallCommon + Pos() token.Pos +} + +type linter struct { + // eventSet holds all the ssa block that is a zerolog.Event type instance + // that should be dispatched. + // Everytime the zerolog.Event is dispatched with Msg() or Send(), + // deletes that block from this set. + // At the end, check if the set is empty, or report the not dispatched block. + eventSet map[posser]struct{} + // deleteLater holds the ssa block that should be deleted from eventSet after + // all the inspection is done. + // this is required because `else` ssa block comes after the dispatch of `if`` block. + // e.g., if err != nil { log.Error() } else { log.Info() } log.Send() + // deleteLater takes care of the log.Info() block. + deleteLater map[posser]struct{} + recLimit uint +} + +func run(pass *analysis.Pass) (interface{}, error) { + srcFuncs := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs + + l := &linter{ + eventSet: make(map[posser]struct{}), + deleteLater: make(map[posser]struct{}), + recLimit: 100, + } + + for _, sf := range srcFuncs { + for _, b := range sf.Blocks { + for _, instr := range b.Instrs { + if c, ok := instr.(*ssa.Call); ok { + l.inspect(c) + } else if c, ok := instr.(*ssa.Defer); ok { + l.inspect(c) + } + } + } + } + + // apply deleteLater to envetSet for else branches of if-else cases + + for k := range l.deleteLater { + delete(l.eventSet, k) + } + + // At the end, if the set is clear -> ok. + // Otherwise, there must be a left zerolog.Event var that weren't dispatched. So report it. + for k := range l.eventSet { + pass.Reportf(k.Pos(), "must be dispatched by Msg or Send method") + } + + return nil, nil +} + +func (l *linter) inspect(cd callDefer) { + c := cd.Common() + + // check if it's in github.com/rs/zerolog/log since there's some + // functions in github.com/rs/zerolog that returns zerolog.Event + // which should not be included. However, zerolog.Logger receiver is an exception. + if isInLogPkg(*c) || isLoggerRecv(*c) { + if isZerologEvent(c.Value) { + // this ssa block should be dispatched afterwards at some point + l.eventSet[cd] = struct{}{} + return + } + } + + // if the call does not return zerolog.Event, + // check if the base is zerolog.Event. + // if so, check if the StaticCallee is Send() or Msg(). + // if so, remove the arg[0] from the set. + f := c.StaticCallee() + if f == nil { + return + } + if !isDispatchMethod(f) { + shouldReturn := true + for _, p := range f.Params { + if isZerologEvent(p) { + // check if this zerolog.Event as a parameter is dispatched in the function + // TODO: technically, it can be dispatched in another function that is called in this function, and + // this algorithm cannot track that. But I'm tired of thinking about that for now. + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch v := instr.(type) { + case *ssa.Call: + if inspectDispatchInFunction(v.Common()) { + shouldReturn = false + break + } + case *ssa.Defer: + if inspectDispatchInFunction(v.Common()) { + shouldReturn = false + break + } + } + } + } + } + } + if shouldReturn { + return + } + } + for _, arg := range c.Args { + if isZerologEvent(arg) { + // if there's branch, track both ways + // this is for the case like: + // logger := log.Info() + // if err != nil { + // logger = log.Error() + // } + // logger.Send() + // + // Similar case like below goes to the same root but that doesn't + // have any side effect. + // logger := log.Info() + // if err != nil { + // logger = logger.Str("a", "b") + // } + // logger.Send() + if phi, ok := arg.(*ssa.Phi); ok { + for _, edge := range phi.Edges { + l.dfsEdge(edge, make(map[ssa.Value]struct{}), 0) + } + } else { + val := getRootSsaValue(arg) + delete(l.eventSet, val) + } + } + } +} + +func (l *linter) dfsEdge(v ssa.Value, visit map[ssa.Value]struct{}, cnt uint) { + // only for safety + if cnt > l.recLimit { + return + } + cnt++ + + if _, ok := visit[v]; ok { + return + } + visit[v] = struct{}{} + + val := getRootSsaValue(v) + phi, ok := val.(*ssa.Phi) + if !ok { + l.deleteLater[val] = struct{}{} + return + } + for _, edge := range phi.Edges { + l.dfsEdge(edge, visit, cnt) + } +} + +func inspectDispatchInFunction(cc *ssa.CallCommon) bool { + if isDispatchMethod(cc.StaticCallee()) { + for _, arg := range cc.Args { + if isZerologEvent(arg) { + return true + } + } + } + return false +} + +func isInLogPkg(c ssa.CallCommon) bool { + switch v := c.Value.(type) { + case ssa.Member: + p := v.Package() + if p == nil { + return false + } + return strings.HasSuffix(p.Pkg.Path(), "github.com/rs/zerolog/log") + } + return false +} + +func isLoggerRecv(c ssa.CallCommon) bool { + switch f := c.Value.(type) { + case *ssa.Function: + if recv := f.Signature.Recv(); recv != nil { + return strings.HasSuffix(types.TypeString(recv.Type(), nil), "zerolog.Logger") + } + } + return false +} + +func isZerologEvent(v ssa.Value) bool { + ts := v.Type().String() + return strings.HasSuffix(ts, "github.com/rs/zerolog.Event") +} + +func isDispatchMethod(f *ssa.Function) bool { + if f == nil { + return false + } + m := f.Name() + if m == "Send" || m == "Msg" || m == "Msgf" || m == "MsgFunc" { + return true + } + return false +} + +func getRootSsaValue(v ssa.Value) ssa.Value { + if c, ok := v.(*ssa.Call); ok { + v := c.Value() + + // When there is no receiver, that's the block of zerolog.Event + // eg. Error() method in log.Error().Str("foo", "bar").Send() + if len(v.Call.Args) == 0 { + return v + } + + // Even when there is a receiver, if it's a zerolog.Logger instance, return this block + // eg. Info() method in zerolog.New(os.Stdout).Info() + root := v.Call.Args[0] + if !isZerologEvent(root) { + return v + } + + // Ok to just return the receiver because all the method in this + // chain is zerolog.Event at this point. + return getRootSsaValue(root) + } + return v +} diff --git a/vendor/gitlab.com/bosi/decorder/.gitignore b/vendor/gitlab.com/bosi/decorder/.gitignore new file mode 100644 index 0000000000..48baa654b0 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/.gitignore @@ -0,0 +1,7 @@ +/.idea +/.env +/.env.example +/decorder +/LICENSES-3RD-PARTY +/ytt +/yq \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/.gitlab-ci.params.yml b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.params.yml new file mode 100644 index 0000000000..fe6b852887 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.params.yml @@ -0,0 +1,15 @@ +#@data/values +--- + +app: + name: decorder + +code_quality: + enable_tests: true + enable_static_code_analyses: true + enable_license_check: true + +deployment: + enable_rc_handling: false + use_gitlab_container_registry: false + enable_image_build_and_deploy: false diff --git a/vendor/gitlab.com/bosi/decorder/.gitlab-ci.yml b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.yml new file mode 100644 index 0000000000..73e1273b34 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/.gitlab-ci.yml @@ -0,0 +1,63 @@ +############################### +# This file is auto-generated # +############################### + +variables: + APP_NAME: decorder + +stages: + - test + - build + - release + +test: + stage: test + image: golang:1.21.0@sha256:b490ae1f0ece153648dd3c5d25be59a63f966b5f9e1311245c947de4506981aa + before_script: + - set -eu + - if [[ -f .env.pipeline ]];then cp .env.pipeline .env;fi + - mkdir -p ~/.ssh + - touch ~/.ssh/known_hosts + - ssh-keyscan gitlab.com > ~/.ssh/known_hosts + retry: 2 + script: + - '### run tests ###' + - make test + - make test-cover + +lint:source-code: + stage: test + image: golangci/golangci-lint:v1.54.2-alpine@sha256:e950721f6ae622dcc041f57cc0b61c3a78d4bbfc588facfc8b0166901a9f4848 + script: + - apk add make bash + - make settings + - '### run linter ###' + - golangci-lint run ./... + +license-check: + stage: test + image: golang:1.21.0@sha256:b490ae1f0ece153648dd3c5d25be59a63f966b5f9e1311245c947de4506981aa + before_script: + - set -eu + - if [[ -f .env.pipeline ]];then cp .env.pipeline .env;fi + - mkdir -p ~/.ssh + - touch ~/.ssh/known_hosts + - ssh-keyscan gitlab.com > ~/.ssh/known_hosts + script: + - '### run license-check ###' + - make check-licenses + artifacts: + paths: + - LICENSES-3RD-PARTY + expire_in: 7 days + +pages: + stage: release + image: golang:1.21.0@sha256:b490ae1f0ece153648dd3c5d25be59a63f966b5f9e1311245c947de4506981aa + only: + - tags + script: + - make gitlab-pages + artifacts: + paths: + - public/ diff --git a/vendor/gitlab.com/bosi/decorder/LICENSE.md b/vendor/gitlab.com/bosi/decorder/LICENSE.md new file mode 100644 index 0000000000..d46c30e18e --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/LICENSE.md @@ -0,0 +1,16 @@ +MIT License + +Copyright (c) 2021 Florian Bosdorff + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit +persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/Makefile b/vendor/gitlab.com/bosi/decorder/Makefile new file mode 100644 index 0000000000..8d4c05690f --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/Makefile @@ -0,0 +1,7 @@ +include project-templates/base.mk + +project-templates/base.mk: + @cp -ar ~/.dotfiles/projects/golang ./project-templates + +.env: + touch .env \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/README.md b/vendor/gitlab.com/bosi/decorder/README.md new file mode 100644 index 0000000000..5947e5ca2c --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/README.md @@ -0,0 +1,50 @@ +# Decorder + +A declaration order linter for Go. In case of this tool declarations are `type`, `const`, `var` and `func`. + +## Rules + +This linter applies multiple rules where each can be disabled via cli parameter. + +| rule | description | cli-options | +|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| declaration order | Enforces the order of global declarations (e.g. all global constants are always defined before variables).
You can also define a subset of declarations if you don't want to enforce the order of all of them. | * disable all checks: `-disable-dec-order-check`
* disable type checks: `-disable-type-dec-order-check`
* disable const checks: `-disable-const-dec-order-check`
* disable var checks: `-disable-var-dec-order-check`
* custom order: `-dec-order var,const,func,type` | +| declaration number | Enforces that the statements const, var and type are only used once per file. You have to use parenthesis
to declare e.g multiple global types inside a file. | disable check: `-disable-dec-num-check` | +| init func first | Enforces the init func to be the first function in file. | disable check: `-disable-init-func-first-check` | + +You may find the implementation of the rules inside `analyzer.go`. + +Underscore var declarations can be ignored via `-ignore-underscore-vars`. + +## Installation + +```shell +go install gitlab.com/bosi/decorder/cmd/decorder +``` + +You can use the linter via golangci-lint as well: https://golangci-lint.run/usage/linters/#decorder. + +## Usage + +```shell +# with default options +decorder ./... + +# custom declaration order +decorder -dec-order var,const,func,type ./... + +# disable declaration order check +decorder -disable-dec-order-check ./... + +# disable check for multiple declarations statements +decorder -disable-dec-num-check ./... + +# disable check for multiple declarations (var only) statements +decorder -disable-var-dec-num-check ./... + +# disable check that init func is always first function +decorder -disable-init-func-first-check ./... + +# ignore underscore variables for all checks +decorder -ignore-underscore-vars ./... +``` \ No newline at end of file diff --git a/vendor/gitlab.com/bosi/decorder/analyzer.go b/vendor/gitlab.com/bosi/decorder/analyzer.go new file mode 100644 index 0000000000..08f82ccc11 --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/analyzer.go @@ -0,0 +1,255 @@ +package decorder + +import ( + "fmt" + "go/ast" + "go/token" + "strings" + "sync" + + "golang.org/x/tools/go/analysis" +) + +type ( + decNumChecker struct { + tokenMap map[string]token.Token + tokenCounts map[token.Token]int + decOrder []string + funcPoss []funcPos + } + + options struct { + decOrder string + ignoreUnderscoreVars bool + disableDecNumCheck bool + disableTypeDecNumCheck bool + disableConstDecNumCheck bool + disableVarDecNumCheck bool + disableDecOrderCheck bool + disableInitFuncFirstCheck bool + } + + funcPos struct { + start token.Pos + end token.Pos + } +) + +const ( + Name = "decorder" + + FlagDo = "dec-order" + FlagIuv = "ignore-underscore-vars" + FlagDdnc = "disable-dec-num-check" + FlagDtdnc = "disable-type-dec-num-check" + FlagDcdnc = "disable-const-dec-num-check" + FlagDvdnc = "disable-var-dec-num-check" + FlagDdoc = "disable-dec-order-check" + FlagDiffc = "disable-init-func-first-check" + + defaultDecOrder = "type,const,var,func" +) + +var ( + Analyzer = &analysis.Analyzer{ + Name: Name, + Doc: "check declaration order and count of types, constants, variables and functions", + Run: run, + } + + opts = options{} + + tokens = []token.Token{token.TYPE, token.CONST, token.VAR, token.FUNC} + + decNumConf = map[token.Token]bool{ + token.TYPE: false, + token.CONST: false, + token.VAR: false, + } + decLock sync.Mutex +) + +//nolint:lll +func init() { + Analyzer.Flags.StringVar(&opts.decOrder, FlagDo, defaultDecOrder, "define the required order of types, constants, variables and functions declarations inside a file") + Analyzer.Flags.BoolVar(&opts.ignoreUnderscoreVars, FlagIuv, false, "option to ignore underscore vars for dec order and dec num check") + Analyzer.Flags.BoolVar(&opts.disableDecNumCheck, FlagDdnc, false, "option to disable (all) checks for number of declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableTypeDecNumCheck, FlagDtdnc, false, "option to disable check for number of type declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableConstDecNumCheck, FlagDcdnc, false, "option to disable check for number of const declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableVarDecNumCheck, FlagDvdnc, false, "option to disable check for number of var declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableDecOrderCheck, FlagDdoc, false, "option to disable check for order of declarations inside file") + Analyzer.Flags.BoolVar(&opts.disableInitFuncFirstCheck, FlagDiffc, false, "option to disable check that init function is always first function in file") +} + +func initDec() { + decLock.Lock() + decNumConf[token.TYPE] = opts.disableTypeDecNumCheck + decNumConf[token.CONST] = opts.disableConstDecNumCheck + decNumConf[token.VAR] = opts.disableVarDecNumCheck + decLock.Unlock() +} + +func run(pass *analysis.Pass) (interface{}, error) { + initDec() + + for _, f := range pass.Files { + ast.Inspect(f, runDeclNumAndDecOrderCheck(pass)) + + if !opts.disableInitFuncFirstCheck { + ast.Inspect(f, runInitFuncFirstCheck(pass)) + } + } + + return nil, nil +} + +func runInitFuncFirstCheck(pass *analysis.Pass) func(ast.Node) bool { + nonInitFound := false + + return func(n ast.Node) bool { + dec, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + + if dec.Name.Name == "init" && dec.Recv == nil { + if nonInitFound { + pass.Reportf(dec.Pos(), "init func must be the first function in file") + } + } else { + nonInitFound = true + } + + return true + } +} + +func runDeclNumAndDecOrderCheck(pass *analysis.Pass) func(ast.Node) bool { + dnc := newDecNumChecker() + + if opts.disableDecNumCheck && opts.disableDecOrderCheck { + return func(n ast.Node) bool { + return true + } + } + + return func(n ast.Node) bool { + fd, ok := n.(*ast.FuncDecl) + if ok { + return dnc.handleFuncDec(fd, pass) + } + + gd, ok := n.(*ast.GenDecl) + if !ok { + return true + } + + if dnc.isInsideFunction(gd) { + return true + } + + dnc.handleGenDecl(gd, pass) + + if !opts.disableDecOrderCheck { + dnc.handleDecOrderCheck(gd, pass) + } + + return true + } +} + +func newDecNumChecker() decNumChecker { + dnc := decNumChecker{ + tokenMap: map[string]token.Token{}, + tokenCounts: map[token.Token]int{}, + decOrder: []string{}, + funcPoss: []funcPos{}, + } + + for _, t := range tokens { + dnc.tokenCounts[t] = 0 + dnc.tokenMap[t.String()] = t + } + + for _, do := range strings.Split(opts.decOrder, ",") { + dnc.decOrder = append(dnc.decOrder, strings.TrimSpace(do)) + } + + return dnc +} + +func (dnc decNumChecker) isToLate(t token.Token) (string, bool) { + for i, do := range dnc.decOrder { + if do == t.String() { + for j := i + 1; j < len(dnc.decOrder); j++ { + if dnc.tokenCounts[dnc.tokenMap[dnc.decOrder[j]]] > 0 { + return dnc.decOrder[j], false + } + } + return "", true + } + } + + return "", true +} + +func (dnc *decNumChecker) handleGenDecl(gd *ast.GenDecl, pass *analysis.Pass) { + for _, t := range tokens { + if gd.Tok == t { + if opts.ignoreUnderscoreVars && declName(gd) == "_" { + continue + } + + dnc.tokenCounts[t]++ + + if !opts.disableDecNumCheck && !decNumConf[t] && dnc.tokenCounts[t] > 1 { + pass.Reportf(gd.Pos(), "multiple \"%s\" declarations are not allowed; use parentheses instead", t.String()) + } + } + } +} + +func declName(gd *ast.GenDecl) string { + for _, spec := range gd.Specs { + s, ok := spec.(*ast.ValueSpec) + if ok && len(s.Names) > 0 && s.Names[0] != nil { + return s.Names[0].Name + } + } + return "" +} + +func (dnc decNumChecker) handleDecOrderCheck(gd *ast.GenDecl, pass *analysis.Pass) { + l, c := dnc.isToLate(gd.Tok) + if !c { + pass.Reportf(gd.Pos(), fmtWrongOrderMsg(gd.Tok.String(), l)) + } +} + +func (dnc decNumChecker) isInsideFunction(dn *ast.GenDecl) bool { + for _, poss := range dnc.funcPoss { + if poss.start < dn.Pos() && poss.end > dn.Pos() { + return true + } + } + return false +} + +func (dnc *decNumChecker) handleFuncDec(fd *ast.FuncDecl, pass *analysis.Pass) bool { + dnc.funcPoss = append(dnc.funcPoss, funcPos{start: fd.Pos(), end: fd.End()}) + + dnc.tokenCounts[token.FUNC]++ + + if !opts.disableDecOrderCheck { + l, c := dnc.isToLate(token.FUNC) + if !c { + pass.Reportf(fd.Pos(), fmtWrongOrderMsg(token.FUNC.String(), l)) + } + } + + return true +} + +func fmtWrongOrderMsg(target string, notAfter string) string { + return fmt.Sprintf("%s must not be placed after %s (desired order: %s)", target, notAfter, opts.decOrder) +} diff --git a/vendor/gitlab.com/bosi/decorder/renovate.json b/vendor/gitlab.com/bosi/decorder/renovate.json new file mode 100644 index 0000000000..60041578ce --- /dev/null +++ b/vendor/gitlab.com/bosi/decorder/renovate.json @@ -0,0 +1,7 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "gitlab>bosi/renovate-configs//configs/golang", + "gitlab>bosi/renovate-configs//configs/automerge" + ] +} diff --git a/vendor/go-simpler.org/musttag/.golangci.yml b/vendor/go-simpler.org/musttag/.golangci.yml new file mode 100644 index 0000000000..641471cc44 --- /dev/null +++ b/vendor/go-simpler.org/musttag/.golangci.yml @@ -0,0 +1,23 @@ +linters: + disable-all: true + enable: + # enabled by default: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused + # disabled by default: + - gocritic + - gofumpt + +linters-settings: + gocritic: + enabled-tags: + - diagnostic + - style + - performance + - experimental + - opinionated diff --git a/vendor/go-simpler.org/musttag/.goreleaser.yml b/vendor/go-simpler.org/musttag/.goreleaser.yml new file mode 100644 index 0000000000..dd75ac945b --- /dev/null +++ b/vendor/go-simpler.org/musttag/.goreleaser.yml @@ -0,0 +1,18 @@ +builds: + - main: ./cmd/musttag + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{ .Version }} + targets: + - darwin_amd64 + - darwin_arm64 + - linux_amd64 + - windows_amd64 + +archives: + - format_overrides: + - goos: windows + format: zip diff --git a/vendor/go-simpler.org/musttag/LICENSE b/vendor/go-simpler.org/musttag/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/vendor/go-simpler.org/musttag/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/go-simpler.org/musttag/README.md b/vendor/go-simpler.org/musttag/README.md new file mode 100644 index 0000000000..54c2744ccf --- /dev/null +++ b/vendor/go-simpler.org/musttag/README.md @@ -0,0 +1,102 @@ +# musttag + +[![checks](https://github.com/go-simpler/musttag/actions/workflows/checks.yml/badge.svg)](https://github.com/go-simpler/musttag/actions/workflows/checks.yml) +[![pkg.go.dev](https://pkg.go.dev/badge/go-simpler.org/musttag.svg)](https://pkg.go.dev/go-simpler.org/musttag) +[![goreportcard](https://goreportcard.com/badge/go-simpler.org/musttag)](https://goreportcard.com/report/go-simpler.org/musttag) +[![codecov](https://codecov.io/gh/go-simpler/musttag/branch/main/graph/badge.svg)](https://codecov.io/gh/go-simpler/musttag) + +A Go linter that enforces field tags in (un)marshaled structs. + +## 📌 About + +`musttag` checks that exported fields of a struct passed to a `Marshal`-like function are annotated with the relevant tag: + +```go +// BAD: +var user struct { + Name string +} +data, err := json.Marshal(user) + +// GOOD: +var user struct { + Name string `json:"name"` +} +data, err := json.Marshal(user) +``` + +The rational from [Uber Style Guide][1]: + +> The serialized form of the structure is a contract between different systems. +> Changes to the structure of the serialized form, including field names, break this contract. +> Specifying field names inside tags makes the contract explicit, +> and it guards against accidentally breaking the contract by refactoring or renaming fields. + +## 🚀 Features + +The following packages are supported out of the box: + +* [encoding/json][2] +* [encoding/xml][3] +* [gopkg.in/yaml.v3][4] +* [github.com/BurntSushi/toml][5] +* [github.com/mitchellh/mapstructure][6] +* [github.com/jmoiron/sqlx][7] + +In addition, any [custom package](#custom-packages) can be added to the list. + +## 📦 Install + +`musttag` is integrated into [`golangci-lint`][8], and this is the recommended way to use it. + +To enable the linter, add the following lines to `.golangci.yml`: + +```yaml +linters: + enable: + - musttag +``` + +Alternatively, you can download a prebuilt binary from the [Releases][9] page to use `musttag` standalone. + +## 📋 Usage + +Run `golangci-lint` with `musttag` enabled. +See the list of [available options][10] to configure the linter. + +When using `sloglint` standalone, pass the options as flags. + +### Custom packages + +To report a custom function, you need to add its description to `.golangci.yml`. +The following is an example of adding support for [`hclsimple.Decode`][11]: + +```yaml +linters-settings: + musttag: + functions: + # The full name of the function, including the package. + - name: github.com/hashicorp/hcl/v2/hclsimple.Decode + # The struct tag whose presence should be ensured. + tag: hcl + # The position of the argument to check. + arg-pos: 2 +``` + +The same can be done via the `-fn=` flag when using `musttag` standalone: + +```shell +musttag -fn="github.com/hashicorp/hcl/v2/hclsimple.DecodeFile:hcl:2" ./... +``` + +[1]: https://github.com/uber-go/guide/blob/master/style.md#use-field-tags-in-marshaled-structs +[2]: https://pkg.go.dev/encoding/json +[3]: https://pkg.go.dev/encoding/xml +[4]: https://pkg.go.dev/gopkg.in/yaml.v3 +[5]: https://pkg.go.dev/github.com/BurntSushi/toml +[6]: https://pkg.go.dev/github.com/mitchellh/mapstructure +[7]: https://pkg.go.dev/github.com/jmoiron/sqlx +[8]: https://golangci-lint.run +[9]: https://github.com/go-simpler/musttag/releases +[10]: https://golangci-lint.run/usage/linters/#musttag +[11]: https://pkg.go.dev/github.com/hashicorp/hcl/v2/hclsimple#Decode diff --git a/vendor/go-simpler.org/musttag/builtins.go b/vendor/go-simpler.org/musttag/builtins.go new file mode 100644 index 0000000000..3305513f8f --- /dev/null +++ b/vendor/go-simpler.org/musttag/builtins.go @@ -0,0 +1,133 @@ +package musttag + +// builtins is a set of functions supported out of the box. +var builtins = []Func{ + // https://pkg.go.dev/encoding/json + { + Name: "encoding/json.Marshal", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/json.MarshalIndent", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/json.Unmarshal", Tag: "json", ArgPos: 1, + ifaceWhitelist: []string{"encoding/json.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*encoding/json.Encoder).Encode", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "(*encoding/json.Decoder).Decode", Tag: "json", ArgPos: 0, + ifaceWhitelist: []string{"encoding/json.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + + // https://pkg.go.dev/encoding/xml + { + Name: "encoding/xml.Marshal", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/xml.MarshalIndent", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "encoding/xml.Unmarshal", Tag: "xml", ArgPos: 1, + ifaceWhitelist: []string{"encoding/xml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*encoding/xml.Encoder).Encode", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "(*encoding/xml.Decoder).Decode", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*encoding/xml.Encoder).EncodeElement", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Marshaler", "encoding.TextMarshaler"}, + }, + { + Name: "(*encoding/xml.Decoder).DecodeElement", Tag: "xml", ArgPos: 0, + ifaceWhitelist: []string{"encoding/xml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + + // https://pkg.go.dev/gopkg.in/yaml.v3 + { + Name: "gopkg.in/yaml.v3.Marshal", Tag: "yaml", ArgPos: 0, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Marshaler"}, + }, + { + Name: "gopkg.in/yaml.v3.Unmarshal", Tag: "yaml", ArgPos: 1, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Unmarshaler"}, + }, + { + Name: "(*gopkg.in/yaml.v3.Encoder).Encode", Tag: "yaml", ArgPos: 0, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Marshaler"}, + }, + { + Name: "(*gopkg.in/yaml.v3.Decoder).Decode", Tag: "yaml", ArgPos: 0, + ifaceWhitelist: []string{"gopkg.in/yaml.v3.Unmarshaler"}, + }, + + // https://pkg.go.dev/github.com/BurntSushi/toml + { + Name: "github.com/BurntSushi/toml.Unmarshal", Tag: "toml", ArgPos: 1, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "github.com/BurntSushi/toml.Decode", Tag: "toml", ArgPos: 1, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "github.com/BurntSushi/toml.DecodeFS", Tag: "toml", ArgPos: 2, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "github.com/BurntSushi/toml.DecodeFile", Tag: "toml", ArgPos: 1, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + { + Name: "(*github.com/BurntSushi/toml.Encoder).Encode", Tag: "toml", ArgPos: 0, + ifaceWhitelist: []string{"encoding.TextMarshaler"}, + }, + { + Name: "(*github.com/BurntSushi/toml.Decoder).Decode", Tag: "toml", ArgPos: 0, + ifaceWhitelist: []string{"github.com/BurntSushi/toml.Unmarshaler", "encoding.TextUnmarshaler"}, + }, + + // https://pkg.go.dev/github.com/mitchellh/mapstructure + {Name: "github.com/mitchellh/mapstructure.Decode", Tag: "mapstructure", ArgPos: 1}, + {Name: "github.com/mitchellh/mapstructure.DecodeMetadata", Tag: "mapstructure", ArgPos: 1}, + {Name: "github.com/mitchellh/mapstructure.WeakDecode", Tag: "mapstructure", ArgPos: 1}, + {Name: "github.com/mitchellh/mapstructure.WeakDecodeMetadata", Tag: "mapstructure", ArgPos: 1}, + + // https://pkg.go.dev/github.com/jmoiron/sqlx + {Name: "github.com/jmoiron/sqlx.Get", Tag: "db", ArgPos: 1}, + {Name: "github.com/jmoiron/sqlx.GetContext", Tag: "db", ArgPos: 2}, + {Name: "github.com/jmoiron/sqlx.Select", Tag: "db", ArgPos: 1}, + {Name: "github.com/jmoiron/sqlx.SelectContext", Tag: "db", ArgPos: 2}, + {Name: "github.com/jmoiron/sqlx.StructScan", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Conn).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Conn).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.DB).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.DB).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.DB).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.DB).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.NamedStmt).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Row).StructScan", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Rows).StructScan", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Stmt).SelectContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Tx).Get", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Tx).GetContext", Tag: "db", ArgPos: 1}, + {Name: "(*github.com/jmoiron/sqlx.Tx).Select", Tag: "db", ArgPos: 0}, + {Name: "(*github.com/jmoiron/sqlx.Tx).SelectContext", Tag: "db", ArgPos: 1}, +} diff --git a/vendor/go-simpler.org/musttag/musttag.go b/vendor/go-simpler.org/musttag/musttag.go new file mode 100644 index 0000000000..b7e41f3d36 --- /dev/null +++ b/vendor/go-simpler.org/musttag/musttag.go @@ -0,0 +1,262 @@ +// Package musttag implements the musttag analyzer. +package musttag + +import ( + "flag" + "fmt" + "go/ast" + "go/types" + "reflect" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +// Func describes a function call to look for, e.g. [json.Marshal]. +type Func struct { + Name string // The full name of the function, including the package. + Tag string // The struct tag whose presence should be ensured. + ArgPos int // The position of the argument to check. + + // a list of interface names (including the package); + // if at least one is implemented by the argument, no check is performed. + ifaceWhitelist []string +} + +// New creates a new musttag analyzer. +// To report a custom function, provide its description as [Func]. +func New(funcs ...Func) *analysis.Analyzer { + var flagFuncs []Func + return &analysis.Analyzer{ + Name: "musttag", + Doc: "enforce field tags in (un)marshaled structs", + Flags: flags(&flagFuncs), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (any, error) { + l := len(builtins) + len(funcs) + len(flagFuncs) + allFuncs := make(map[string]Func, l) + + merge := func(slice []Func) { + for _, fn := range slice { + allFuncs[fn.Name] = fn + } + } + merge(builtins) + merge(funcs) + merge(flagFuncs) + + mainModule, err := getMainModule() + if err != nil { + return nil, err + } + + return run(pass, mainModule, allFuncs) + }, + } +} + +func flags(funcs *[]Func) flag.FlagSet { + fs := flag.NewFlagSet("musttag", flag.ContinueOnError) + fs.Func("fn", "report a custom function (name:tag:arg-pos)", func(s string) error { + parts := strings.Split(s, ":") + if len(parts) != 3 || parts[0] == "" || parts[1] == "" { + return strconv.ErrSyntax + } + pos, err := strconv.Atoi(parts[2]) + if err != nil { + return err + } + *funcs = append(*funcs, Func{ + Name: parts[0], + Tag: parts[1], + ArgPos: pos, + }) + return nil + }) + return *fs +} + +func run(pass *analysis.Pass, mainModule string, funcs map[string]Func) (_ any, err error) { + visit := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + filter := []ast.Node{(*ast.CallExpr)(nil)} + + visit.Preorder(filter, func(node ast.Node) { + if err != nil { + return // there is already an error. + } + + call, ok := node.(*ast.CallExpr) + if !ok { + return // not a function call. + } + + callee := typeutil.StaticCallee(pass.TypesInfo, call) + if callee == nil { + return // not a static call. + } + + fn, ok := funcs[cutVendor(callee.FullName())] + if !ok { + return // unsupported function. + } + + if len(call.Args) <= fn.ArgPos { + err = fmt.Errorf("musttag: Func.ArgPos cannot be %d: %s accepts only %d argument(s)", fn.ArgPos, fn.Name, len(call.Args)) + return + } + + arg := call.Args[fn.ArgPos] + if ident, ok := arg.(*ast.Ident); ok && ident.Obj == nil { + return // e.g. json.Marshal(nil) + } + + typ := pass.TypesInfo.TypeOf(arg) + if typ == nil { + return // no type info found. + } + + // TODO: check nested structs too. + if implementsInterface(typ, fn.ifaceWhitelist, pass.Pkg.Imports()) { + return // the type implements a Marshaler interface; see issue #64. + } + + checker := checker{ + mainModule: mainModule, + seenTypes: make(map[string]struct{}), + } + + styp, ok := checker.parseStruct(typ) + if !ok { + return // not a struct. + } + + if valid := checker.checkStruct(styp, fn.Tag); valid { + return // nothing to report. + } + + pass.Reportf(arg.Pos(), "the given struct should be annotated with the `%s` tag", fn.Tag) + }) + + return nil, err +} + +type checker struct { + mainModule string + seenTypes map[string]struct{} +} + +func (c *checker) parseStruct(typ types.Type) (*types.Struct, bool) { + for { + // unwrap pointers (if any) first. + ptr, ok := typ.(*types.Pointer) + if !ok { + break + } + typ = ptr.Elem() + } + + switch typ := typ.(type) { + case *types.Named: // a struct of the named type. + pkg := typ.Obj().Pkg() + if pkg == nil { + return nil, false + } + if !strings.HasPrefix(pkg.Path(), c.mainModule) { + return nil, false + } + styp, ok := typ.Underlying().(*types.Struct) + if !ok { + return nil, false + } + return styp, true + + case *types.Struct: // an anonymous struct. + return typ, true + + default: + return nil, false + } +} + +func (c *checker) checkStruct(styp *types.Struct, tag string) (valid bool) { + c.seenTypes[styp.String()] = struct{}{} + + for i := 0; i < styp.NumFields(); i++ { + field := styp.Field(i) + if !field.Exported() { + continue + } + + if _, ok := reflect.StructTag(styp.Tag(i)).Lookup(tag); !ok { + // tag is not required for embedded types; see issue #12. + if !field.Embedded() { + return false + } + } + + nested, ok := c.parseStruct(field.Type()) + if !ok { + continue + } + if _, ok := c.seenTypes[nested.String()]; ok { + continue + } + if valid := c.checkStruct(nested, tag); !valid { + return false + } + } + + return true +} + +func implementsInterface(typ types.Type, ifaces []string, imports []*types.Package) bool { + findScope := func(pkgName string) (*types.Scope, bool) { + // fast path: check direct imports (e.g. looking for "encoding/json.Marshaler"). + for _, direct := range imports { + if pkgName == cutVendor(direct.Path()) { + return direct.Scope(), true + } + } + // slow path: check indirect imports (e.g. looking for "encoding.TextMarshaler"). + // TODO: only check indirect imports from the package (e.g. "encoding/json") of the analyzed function (e.g. "encoding/json.Marshal"). + for _, direct := range imports { + for _, indirect := range direct.Imports() { + if pkgName == cutVendor(indirect.Path()) { + return indirect.Scope(), true + } + } + } + return nil, false + } + + for _, ifacePath := range ifaces { + // "encoding/json.Marshaler" -> "encoding/json" + "Marshaler" + idx := strings.LastIndex(ifacePath, ".") + if idx == -1 { + continue + } + pkgName, ifaceName := ifacePath[:idx], ifacePath[idx+1:] + + scope, ok := findScope(pkgName) + if !ok { + continue + } + obj := scope.Lookup(ifaceName) + if obj == nil { + continue + } + iface, ok := obj.Type().Underlying().(*types.Interface) + if !ok { + continue + } + if types.Implements(typ, iface) { + return true + } + } + + return false +} diff --git a/vendor/go-simpler.org/musttag/utils.go b/vendor/go-simpler.org/musttag/utils.go new file mode 100644 index 0000000000..1a13f96c28 --- /dev/null +++ b/vendor/go-simpler.org/musttag/utils.go @@ -0,0 +1,49 @@ +package musttag + +import ( + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +func getMainModule() (string, error) { + args := [...]string{"go", "mod", "edit", "-json"} + + out, err := exec.Command(args[0], args[1:]...).Output() + if err != nil { + return "", fmt.Errorf("running %q: %w", strings.Join(args[:], " "), err) + } + + var info struct { + Module struct { + Path string `json:"Path"` + } `json:"Module"` + } + if err := json.Unmarshal(out, &info); err != nil { + return "", fmt.Errorf("decoding module info: %w\n%s", err, out) + } + + return info.Module.Path, nil +} + +// based on golang.org/x/tools/imports.VendorlessPath +func cutVendor(path string) string { + var prefix string + + switch { + case strings.HasPrefix(path, "(*"): + prefix, path = "(*", path[len("(*"):] + case strings.HasPrefix(path, "("): + prefix, path = "(", path[len("("):] + } + + if i := strings.LastIndex(path, "/vendor/"); i >= 0 { + return prefix + path[i+len("/vendor/"):] + } + if strings.HasPrefix(path, "vendor/") { + return prefix + path[len("vendor/"):] + } + + return prefix + path +} diff --git a/vendor/go-simpler.org/sloglint/.golangci.yml b/vendor/go-simpler.org/sloglint/.golangci.yml new file mode 100644 index 0000000000..ef926a0562 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/.golangci.yml @@ -0,0 +1,22 @@ +linters: + disable-all: true + enable: + # enabled by default: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + # disabled by default: + - gocritic + - gofumpt + +linters-settings: + gocritic: + enabled-tags: + - diagnostic + - style + - performance + - experimental + - opinionated diff --git a/vendor/go-simpler.org/sloglint/.goreleaser.yml b/vendor/go-simpler.org/sloglint/.goreleaser.yml new file mode 100644 index 0000000000..d31ea11d39 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/.goreleaser.yml @@ -0,0 +1,18 @@ +builds: + - main: ./cmd/sloglint + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{.Version}} + targets: + - darwin_amd64 + - darwin_arm64 + - linux_amd64 + - windows_amd64 + +archives: + - format_overrides: + - goos: windows + format: zip diff --git a/vendor/go-simpler.org/sloglint/LICENSE b/vendor/go-simpler.org/sloglint/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/go-simpler.org/sloglint/README.md b/vendor/go-simpler.org/sloglint/README.md new file mode 100644 index 0000000000..fc4d9debb9 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/README.md @@ -0,0 +1,165 @@ +# sloglint + +[![checks](https://github.com/go-simpler/sloglint/actions/workflows/checks.yml/badge.svg)](https://github.com/go-simpler/sloglint/actions/workflows/checks.yml) +[![pkg.go.dev](https://pkg.go.dev/badge/go-simpler.org/sloglint.svg)](https://pkg.go.dev/go-simpler.org/sloglint) +[![goreportcard](https://goreportcard.com/badge/go-simpler.org/sloglint)](https://goreportcard.com/report/go-simpler.org/sloglint) +[![codecov](https://codecov.io/gh/go-simpler/sloglint/branch/main/graph/badge.svg)](https://codecov.io/gh/go-simpler/sloglint) + +A Go linter that ensures consistent code style when using `log/slog`. + +## 📌 About + +The `log/slog` API allows two different types of arguments: key-value pairs and attributes. +While people may have different opinions about which one is better, most seem to agree on one thing: it should be consistent. +With `sloglint` you can enforce various rules for `log/slog` based on your preferred code style. + +## 🚀 Features + +* Enforce not mixing key-value pairs and attributes (default) +* Enforce using either key-value pairs only or attributes only (optional) +* Enforce using methods that accept a context (optional) +* Enforce using static log messages (optional) +* Enforce using constants instead of raw keys (optional) +* Enforce a single key naming convention (optional) +* Enforce putting arguments on separate lines (optional) + +## 📦 Install + +`sloglint` is integrated into [`golangci-lint`][1], and this is the recommended way to use it. + +To enable the linter, add the following lines to `.golangci.yml`: + +```yaml +linters: + enable: + - sloglint +``` + +Alternatively, you can download a prebuilt binary from the [Releases][2] page to use `sloglint` standalone. + +## 📋 Usage + +Run `golangci-lint` with `sloglint` enabled. +See the list of [available options][3] to configure the linter. + +When using `sloglint` standalone, pass the options as flags of the same name. + +### No mixed arguments + +The `no-mixed-args` option causes `sloglint` to report mixing key-values pairs and attributes within a single function call: + +```go +slog.Info("a user has logged in", "user_id", 42, slog.String("ip_address", "192.0.2.0")) // sloglint: key-value pairs and attributes should not be mixed +``` + +It is enabled by default. + +### Key-value pairs only + +The `kv-only` option causes `sloglint` to report any use of attributes: + +```go +slog.Info("a user has logged in", slog.Int("user_id", 42)) // sloglint: attributes should not be used +``` + +### Attributes only + +In contrast, the `attr-only` option causes `sloglint` to report any use of key-value pairs: + +```go +slog.Info("a user has logged in", "user_id", 42) // sloglint: key-value pairs should not be used +``` + +### Context only + +Some `slog.Handler` implementations make use of the given `context.Context` (e.g. to access context values). +For them to work properly, you need to pass a context to all logger calls. +The `context-only` option causes `sloglint` to report the use of methods without a context: + +```go +slog.Info("a user has logged in") // sloglint: methods without a context should not be used +``` + +This report can be fixed by using the equivalent method with the `Context` suffix: + +```go +slog.InfoContext(ctx, "a user has logged in") +``` + +### Static messages + +To get the most out of structured logging, you may want to require log messages to be static. +The `static-msg` option causes `sloglint` to report non-static messages: + +```go +slog.Info(fmt.Sprintf("a user with id %d has logged in", 42)) // sloglint: message should be a string literal or a constant +``` + +The report can be fixed by moving dynamic values to arguments: + +```go +slog.Info("a user has logged in", "user_id", 42) +``` + +### No raw keys + +To prevent typos, you may want to forbid the use of raw keys altogether. +The `no-raw-keys` option causes `sloglint` to report the use of strings as keys +(including `slog.Attr` calls, e.g. `slog.Int("user_id", 42)`): + +```go +slog.Info("a user has logged in", "user_id", 42) // sloglint: raw keys should not be used +``` + +This report can be fixed by using either constants... + +```go +const UserId = "user_id" + +slog.Info("a user has logged in", UserId, 42) +``` + +...or custom `slog.Attr` constructors: + +```go +func UserId(value int) slog.Attr { return slog.Int("user_id", value) } + +slog.Info("a user has logged in", UserId(42)) +``` + +> [!TIP] +> Such helpers can be automatically generated for you by the [`sloggen`][4] tool. Give it a try too! + +### Key naming convention + +To ensure consistency in logs, you may want to enforce a single key naming convention. +The `key-naming-case` option causes `sloglint` to report keys written in a case other than the given one: + +```go +slog.Info("a user has logged in", "user-id", 42) // sloglint: keys should be written in snake_case +``` + +Possible values are `snake`, `kebab`, `camel`, or `pascal`. + +### Arguments on separate lines + +To improve code readability, you may want to put arguments on separate lines, especially when using key-value pairs. +The `args-on-sep-lines` option causes `sloglint` to report 2+ arguments on the same line: + +```go +slog.Info("a user has logged in", "user_id", 42, "ip_address", "192.0.2.0") // sloglint: arguments should be put on separate lines +``` + +This report can be fixed by reformatting the code: + +```go +slog.Info("a user has logged in", + "user_id", 42, + "ip_address", "192.0.2.0", +) +``` + +[1]: https://golangci-lint.run +[2]: https://github.com/go-simpler/sloglint/releases +[3]: https://golangci-lint.run/usage/linters/#sloglint +[4]: https://github.com/go-simpler/sloggen diff --git a/vendor/go-simpler.org/sloglint/sloglint.go b/vendor/go-simpler.org/sloglint/sloglint.go new file mode 100644 index 0000000000..b7f72ce485 --- /dev/null +++ b/vendor/go-simpler.org/sloglint/sloglint.go @@ -0,0 +1,347 @@ +// Package sloglint implements the sloglint analyzer. +package sloglint + +import ( + "errors" + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "strconv" + + "github.com/ettle/strcase" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +// Options are options for the sloglint analyzer. +type Options struct { + NoMixedArgs bool // Enforce not mixing key-value pairs and attributes (default true). + KVOnly bool // Enforce using key-value pairs only (overrides NoMixedArgs, incompatible with AttrOnly). + AttrOnly bool // Enforce using attributes only (overrides NoMixedArgs, incompatible with KVOnly). + ContextOnly bool // Enforce using methods that accept a context. + StaticMsg bool // Enforce using static log messages. + NoRawKeys bool // Enforce using constants instead of raw keys. + KeyNamingCase string // Enforce a single key naming convention ("snake", "kebab", "camel", or "pascal"). + ArgsOnSepLines bool // Enforce putting arguments on separate lines. +} + +// New creates a new sloglint analyzer. +func New(opts *Options) *analysis.Analyzer { + if opts == nil { + opts = &Options{NoMixedArgs: true} + } + return &analysis.Analyzer{ + Name: "sloglint", + Doc: "ensure consistent code style when using log/slog", + Flags: flags(opts), + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (any, error) { + if opts.KVOnly && opts.AttrOnly { + return nil, fmt.Errorf("sloglint: Options.KVOnly and Options.AttrOnly: %w", errIncompatible) + } + switch opts.KeyNamingCase { + case "", snakeCase, kebabCase, camelCase, pascalCase: + default: + return nil, fmt.Errorf("sloglint: Options.KeyNamingCase=%s: %w", opts.KeyNamingCase, errInvalidValue) + } + run(pass, opts) + return nil, nil + }, + } +} + +var ( + errIncompatible = errors.New("incompatible options") + errInvalidValue = errors.New("invalid value") +) + +func flags(opts *Options) flag.FlagSet { + fs := flag.NewFlagSet("sloglint", flag.ContinueOnError) + + boolVar := func(value *bool, name, usage string) { + fs.Func(name, usage, func(s string) error { + v, err := strconv.ParseBool(s) + *value = v + return err + }) + } + + boolVar(&opts.NoMixedArgs, "no-mixed-args", "enforce not mixing key-value pairs and attributes (default true)") + boolVar(&opts.KVOnly, "kv-only", "enforce using key-value pairs only (overrides -no-mixed-args, incompatible with -attr-only)") + boolVar(&opts.AttrOnly, "attr-only", "enforce using attributes only (overrides -no-mixed-args, incompatible with -kv-only)") + boolVar(&opts.ContextOnly, "context-only", "enforce using methods that accept a context") + boolVar(&opts.StaticMsg, "static-msg", "enforce using static log messages") + boolVar(&opts.NoRawKeys, "no-raw-keys", "enforce using constants instead of raw keys") + boolVar(&opts.ArgsOnSepLines, "args-on-sep-lines", "enforce putting arguments on separate lines") + + fs.Func("key-naming-case", "enforce a single key naming convention (snake|kebab|camel|pascal)", func(s string) error { + opts.KeyNamingCase = s + return nil + }) + + return *fs +} + +var slogFuncs = map[string]int{ // funcName:argsPos + "log/slog.Log": 3, + "log/slog.Debug": 1, + "log/slog.Info": 1, + "log/slog.Warn": 1, + "log/slog.Error": 1, + "log/slog.DebugContext": 2, + "log/slog.InfoContext": 2, + "log/slog.WarnContext": 2, + "log/slog.ErrorContext": 2, + "(*log/slog.Logger).Log": 3, + "(*log/slog.Logger).Debug": 1, + "(*log/slog.Logger).Info": 1, + "(*log/slog.Logger).Warn": 1, + "(*log/slog.Logger).Error": 1, + "(*log/slog.Logger).DebugContext": 2, + "(*log/slog.Logger).InfoContext": 2, + "(*log/slog.Logger).WarnContext": 2, + "(*log/slog.Logger).ErrorContext": 2, +} + +var attrFuncs = map[string]struct{}{ + "log/slog.String": {}, + "log/slog.Int64": {}, + "log/slog.Int": {}, + "log/slog.Uint64": {}, + "log/slog.Float64": {}, + "log/slog.Bool": {}, + "log/slog.Time": {}, + "log/slog.Duration": {}, + "log/slog.Group": {}, + "log/slog.Any": {}, +} + +const ( + snakeCase = "snake" + kebabCase = "kebab" + camelCase = "camel" + pascalCase = "pascal" +) + +func run(pass *analysis.Pass, opts *Options) { + visit := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + filter := []ast.Node{(*ast.CallExpr)(nil)} + + visit.Preorder(filter, func(node ast.Node) { + call := node.(*ast.CallExpr) + + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if fn == nil { + return + } + + argsPos, ok := slogFuncs[fn.FullName()] + if !ok { + return + } + + if opts.ContextOnly { + typ := pass.TypesInfo.TypeOf(call.Args[0]) + if typ != nil && typ.String() != "context.Context" { + pass.Reportf(call.Pos(), "methods without a context should not be used") + } + } + if opts.StaticMsg && !staticMsg(call.Args[argsPos-1]) { + pass.Reportf(call.Pos(), "message should be a string literal or a constant") + } + + // NOTE: we assume that the arguments have already been validated by govet. + args := call.Args[argsPos:] + if len(args) == 0 { + return + } + + var keys []ast.Expr + var attrs []ast.Expr + + for i := 0; i < len(args); i++ { + typ := pass.TypesInfo.TypeOf(args[i]) + if typ == nil { + continue + } + switch typ.String() { + case "string": + keys = append(keys, args[i]) + i++ // skip the value. + case "log/slog.Attr": + attrs = append(attrs, args[i]) + } + } + + switch { + case opts.KVOnly && len(attrs) > 0: + pass.Reportf(call.Pos(), "attributes should not be used") + case opts.AttrOnly && len(attrs) < len(args): + pass.Reportf(call.Pos(), "key-value pairs should not be used") + case opts.NoMixedArgs && 0 < len(attrs) && len(attrs) < len(args): + pass.Reportf(call.Pos(), "key-value pairs and attributes should not be mixed") + } + + if opts.NoRawKeys && rawKeysUsed(pass.TypesInfo, keys, attrs) { + pass.Reportf(call.Pos(), "raw keys should not be used") + } + if opts.ArgsOnSepLines && argsOnSameLine(pass.Fset, call, keys, attrs) { + pass.Reportf(call.Pos(), "arguments should be put on separate lines") + } + + switch { + case opts.KeyNamingCase == snakeCase && badKeyNames(pass.TypesInfo, strcase.ToSnake, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in snake_case") + case opts.KeyNamingCase == kebabCase && badKeyNames(pass.TypesInfo, strcase.ToKebab, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in kebab-case") + case opts.KeyNamingCase == camelCase && badKeyNames(pass.TypesInfo, strcase.ToCamel, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in camelCase") + case opts.KeyNamingCase == pascalCase && badKeyNames(pass.TypesInfo, strcase.ToPascal, keys, attrs): + pass.Reportf(call.Pos(), "keys should be written in PascalCase") + } + }) +} + +func staticMsg(expr ast.Expr) bool { + switch msg := expr.(type) { + case *ast.BasicLit: // e.g. slog.Info("msg") + return msg.Kind == token.STRING + case *ast.Ident: // e.g. const msg = "msg"; slog.Info(msg) + return msg.Obj != nil && msg.Obj.Kind == ast.Con + default: + return false + } +} + +func rawKeysUsed(info *types.Info, keys, attrs []ast.Expr) bool { + isConst := func(expr ast.Expr) bool { + ident, ok := expr.(*ast.Ident) + return ok && ident.Obj != nil && ident.Obj.Kind == ast.Con + } + + for _, key := range keys { + if !isConst(key) { + return true + } + } + + for _, attr := range attrs { + switch attr := attr.(type) { + case *ast.CallExpr: // e.g. slog.Int() + fn := typeutil.StaticCallee(info, attr) + if _, ok := attrFuncs[fn.FullName()]; ok && !isConst(attr.Args[0]) { + return true + } + + case *ast.CompositeLit: // slog.Attr{} + isRawKey := func(kv *ast.KeyValueExpr) bool { + return kv.Key.(*ast.Ident).Name == "Key" && !isConst(kv.Value) + } + + switch len(attr.Elts) { + case 1: // slog.Attr{Key: ...} | slog.Attr{Value: ...} + kv := attr.Elts[0].(*ast.KeyValueExpr) + if isRawKey(kv) { + return true + } + case 2: // slog.Attr{..., ...} | slog.Attr{Key: ..., Value: ...} + kv1, ok := attr.Elts[0].(*ast.KeyValueExpr) + if ok { + kv2 := attr.Elts[1].(*ast.KeyValueExpr) + if isRawKey(kv1) || isRawKey(kv2) { + return true + } + } else if !isConst(attr.Elts[0]) { + return true + } + } + } + } + + return false +} + +func badKeyNames(info *types.Info, caseFn func(string) string, keys, attrs []ast.Expr) bool { + for _, key := range keys { + if name, ok := getKeyName(key); ok && name != caseFn(name) { + return true + } + } + + for _, attr := range attrs { + var expr ast.Expr + switch attr := attr.(type) { + case *ast.CallExpr: // e.g. slog.Int() + fn := typeutil.StaticCallee(info, attr) + if _, ok := attrFuncs[fn.FullName()]; ok { + expr = attr.Args[0] + } + case *ast.CompositeLit: // slog.Attr{} + switch len(attr.Elts) { + case 1: // slog.Attr{Key: ...} | slog.Attr{Value: ...} + if kv := attr.Elts[0].(*ast.KeyValueExpr); kv.Key.(*ast.Ident).Name == "Key" { + expr = kv.Value + } + case 2: // slog.Attr{..., ...} | slog.Attr{Key: ..., Value: ...} + expr = attr.Elts[0] + if kv1, ok := attr.Elts[0].(*ast.KeyValueExpr); ok && kv1.Key.(*ast.Ident).Name == "Key" { + expr = kv1.Value + } + if kv2, ok := attr.Elts[1].(*ast.KeyValueExpr); ok && kv2.Key.(*ast.Ident).Name == "Key" { + expr = kv2.Value + } + } + } + if name, ok := getKeyName(expr); ok && name != caseFn(name) { + return true + } + } + + return false +} + +func getKeyName(expr ast.Expr) (string, bool) { + if expr == nil { + return "", false + } + if ident, ok := expr.(*ast.Ident); ok { + if ident.Obj == nil || ident.Obj.Decl == nil || ident.Obj.Kind != ast.Con { + return "", false + } + if spec, ok := ident.Obj.Decl.(*ast.ValueSpec); ok && len(spec.Values) > 0 { + // TODO: support len(spec.Values) > 1; e.g. "const foo, bar = 1, 2" + expr = spec.Values[0] + } + } + if lit, ok := expr.(*ast.BasicLit); ok && lit.Kind == token.STRING { + return lit.Value, true + } + return "", false +} + +func argsOnSameLine(fset *token.FileSet, call ast.Expr, keys, attrs []ast.Expr) bool { + if len(keys)+len(attrs) <= 1 { + return false // special case: slog.Info("msg", "key", "value") is ok. + } + + l := len(keys) + len(attrs) + 1 + args := make([]ast.Expr, 0, l) + args = append(args, call) + args = append(args, keys...) + args = append(args, attrs...) + + lines := make(map[int]struct{}, l) + for _, arg := range args { + line := fset.Position(arg.Pos()).Line + if _, ok := lines[line]; ok { + return true + } + lines[line] = struct{}{} + } + + return false +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 0000000000..571116cc39 --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 0000000000..c3fa253893 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,12 @@ +/bin +.DS_Store +/vendor +cover.html +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 0000000000..13d0a4f254 --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +env: + global: + - GO111MODULE=on + +matrix: + include: + - go: oldstable + - go: stable + env: LINT=1 + +cache: + directories: + - vendor + +before_install: + - go version + +script: + - test -z "$LINT" || make lint + - make cover + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md new file mode 100644 index 0000000000..24c0274dc3 --- /dev/null +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -0,0 +1,76 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 0000000000..8765c9fbc6 --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 0000000000..1b1376d425 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,78 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + go.uber.org/atomic/internal/gen-atomicint \ + go.uber.org/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 0000000000..ade0c20f16 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,63 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u go.uber.org/atomic@v1 +``` + +### Legacy Import Path + +As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way +of using this package. If you are using Go modules, this package will fail to +compile with the legacy import path path `github.com/uber-go/atomic`. + +We recommend migrating your code to the new import path but if you're unable +to do so, or if your dependencies are still using the old import path, you +will have to add a `replace` directive to your `go.mod` file downgrading the +legacy import path to an older version. + +``` +replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 +``` + +You can do so automatically by running the following command. + +```shell +$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go new file mode 100644 index 0000000000..9cf1914b1f --- /dev/null +++ b/vendor/go.uber.org/atomic/bool.go @@ -0,0 +1,81 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(v bool) *Bool { + x := &Bool{} + if v != _zeroBool { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(v bool) { + x.v.Store(boolToInt(v)) +} + +// CAS is an atomic compare-and-swap for bool values. +func (x *Bool) CAS(o, n bool) bool { + return x.v.CAS(boolToInt(o), boolToInt(n)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(o bool) bool { + return truthy(x.v.Swap(boolToInt(o))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go new file mode 100644 index 0000000000..c7bf7a827a --- /dev/null +++ b/vendor/go.uber.org/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go new file mode 100644 index 0000000000..ae7390ee68 --- /dev/null +++ b/vendor/go.uber.org/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go new file mode 100644 index 0000000000..027cfcb20b --- /dev/null +++ b/vendor/go.uber.org/atomic/duration.go @@ -0,0 +1,82 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(v time.Duration) *Duration { + x := &Duration{} + if v != _zeroDuration { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(v time.Duration) { + x.v.Store(int64(v)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CAS(o, n time.Duration) bool { + return x.v.CAS(int64(o), int64(n)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(o time.Duration) time.Duration { + return time.Duration(x.v.Swap(int64(o))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go new file mode 100644 index 0000000000..6273b66bd6 --- /dev/null +++ b/vendor/go.uber.org/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// String encodes the wrapped value as a string. +func (d *Duration) String() string { + return d.Load().String() +} diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 0000000000..a6166fbea0 --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,51 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(v error) *Error { + x := &Error{} + if v != _zeroError { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(v error) { + x.v.Store(packError(v)) +} diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go new file mode 100644 index 0000000000..ffe0be21cb --- /dev/null +++ b/vendor/go.uber.org/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go new file mode 100644 index 0000000000..0719060207 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64.go @@ -0,0 +1,76 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(v float64) *Float64 { + x := &Float64{} + if v != _zeroFloat64 { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(v float64) { + x.v.Store(math.Float64bits(v)) +} + +// CAS is an atomic compare-and-swap for float64 values. +func (x *Float64) CAS(o, n float64) bool { + return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go new file mode 100644 index 0000000000..927b1add74 --- /dev/null +++ b/vendor/go.uber.org/atomic/float64_ext.go @@ -0,0 +1,47 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "strconv" + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go new file mode 100644 index 0000000000..50d6b24858 --- /dev/null +++ b/vendor/go.uber.org/atomic/gen.go @@ -0,0 +1,26 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go new file mode 100644 index 0000000000..18ae56493e --- /dev/null +++ b/vendor/go.uber.org/atomic/int32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(i int32) *Int32 { + return &Int32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go new file mode 100644 index 0000000000..2bcbbfaa95 --- /dev/null +++ b/vendor/go.uber.org/atomic/int64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(i int64) *Int64 { + return &Int64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go new file mode 100644 index 0000000000..a8201cb4a1 --- /dev/null +++ b/vendor/go.uber.org/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 0000000000..225b7a2be0 --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,54 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(v string) *String { + x := &String{} + if v != _zeroString { + x.Store(v) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + if v := x.v.Load(); v != nil { + return v.(string) + } + return _zeroString +} + +// Store atomically stores the passed string. +func (x *String) Store(v string) { + x.v.Store(v) +} diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go new file mode 100644 index 0000000000..3a9558213d --- /dev/null +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -0,0 +1,43 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go new file mode 100644 index 0000000000..a973aba1a6 --- /dev/null +++ b/vendor/go.uber.org/atomic/uint32.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go new file mode 100644 index 0000000000..3b6c71fd5a --- /dev/null +++ b/vendor/go.uber.org/atomic/uint64.go @@ -0,0 +1,102 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{v: i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go new file mode 100644 index 0000000000..671f3a3824 --- /dev/null +++ b/vendor/go.uber.org/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + atomic.Value + + _ nocmp // disallow non-atomic comparison +} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 0000000000..6d4d1be7b5 --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 0000000000..b9a05e3da0 --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml new file mode 100644 index 0000000000..8636ab42ad --- /dev/null +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -0,0 +1,23 @@ +sudo: false +language: go +go_import_path: go.uber.org/multierr + +env: + global: + - GO111MODULE=on + +go: + - oldstable + - stable + +before_install: +- go version + +script: +- | + set -e + make lint + make cover + +after_success: +- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 0000000000..6f1db9ef4a --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,60 @@ +Releases +======== + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 0000000000..858e02475f --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 0000000000..316004400b --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,42 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html + +update-license: + @cd tools && go install go.uber.org/tools/update-license + @$(GOBIN)/update-license $(GO_FILES) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 0000000000..751bd65e58 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,23 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Installation + + go get -u go.uber.org/multierr + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg +[doc]: https://godoc.org/go.uber.org/multierr +[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://travis-ci.com/uber-go/multierr +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 0000000000..5c9b67d537 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,449 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a verison that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml new file mode 100644 index 0000000000..6ef084ec24 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/multierr +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go new file mode 100644 index 0000000000..264b0eac0d --- /dev/null +++ b/vendor/go.uber.org/multierr/go113.go @@ -0,0 +1,52 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build go1.13 + +package multierr + +import "errors" + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 0000000000..8e5ca7d3e2 --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/go.uber.org/zap/.gitignore similarity index 83% rename from vendor/github.com/pkg/errors/.gitignore rename to vendor/go.uber.org/zap/.gitignore index daf913b1b3..da9d9d00b4 100644 --- a/vendor/github.com/pkg/errors/.gitignore +++ b/vendor/go.uber.org/zap/.gitignore @@ -6,6 +6,7 @@ # Folders _obj _test +vendor # Architecture specific extensions/prefixes *.[568vq] @@ -22,3 +23,10 @@ _testmain.go *.exe *.test *.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 0000000000..92aa65d660 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,109 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 0000000000..0db1f9f15f --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,617 @@ +# Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 +[#487]: https://github.com/uber-go/zap/pull/487 +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 +[#504]: https://github.com/uber-go/zap/pull/504 +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 +[#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 +[#751]: https://github.com/uber-go/zap/pull/751 +[#758]: https://github.com/uber-go/zap/pull/758 +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e327d9aa5c --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 0000000000..ea02f3cae2 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +```bash +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +```bash +make test +make lint +``` + +## Making Changes + +Start by creating a new branch for your changes: + +```bash +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +```bash +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We _try_ to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 0000000000..b183b20bc1 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 0000000000..6652bed45f --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 0000000000..9b1bc3b0e1 --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,73 @@ +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +STATICCHECK = $(GOBIN)/staticcheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +# +# We track coverage only for the main module. +MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test + +# Many Go tools take file globs or directories as arguments instead of packages. +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: all +all: lint test + +.PHONY: lint +lint: $(GOLINT) $(STATICCHECK) + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking lint..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking staticcheck..." + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./checklicense.sh | tee -a lint.log + @[ ! -s lint.log ] + @echo "Checking 'go mod tidy'..." + @make tidy + @if ! git diff --quiet; then \ + echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ + git --no-pager diff; \ + fi + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 0000000000..a553a428c8 --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,133 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users _choose_ when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :---------: | :-----------: | :---------------: | +| :zap: zap | 2900 ns/op | +0% | 5 allocs/op | +| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op | +| zerolog | 10639 ns/op | +267% | 32 allocs/op | +| go-kit | 14434 ns/op | +398% | 59 allocs/op | +| logrus | 17104 ns/op | +490% | 81 allocs/op | +| apex/log | 32424 ns/op | +1018% | 66 allocs/op | +| log15 | 33579 ns/op | +1058% | 76 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :---------: | :-----------: | :---------------: | +| :zap: zap | 373 ns/op | +0% | 0 allocs/op | +| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op | +| zerolog | 288 ns/op | -23% | 0 allocs/op | +| go-kit | 11785 ns/op | +3060% | 58 allocs/op | +| logrus | 19629 ns/op | +5162% | 70 allocs/op | +| log15 | 21866 ns/op | +5762% | 72 allocs/op | +| apex/log | 30890 ns/op | +8182% | 55 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------------------ | :--------: | :-----------: | :---------------: | +| :zap: zap | 381 ns/op | +0% | 0 allocs/op | +| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op | +| zerolog | 369 ns/op | -3% | 0 allocs/op | +| standard library | 385 ns/op | +1% | 2 allocs/op | +| go-kit | 606 ns/op | +59% | 11 allocs/op | +| logrus | 1730 ns/op | +354% | 25 allocs/op | +| apex/log | 1998 ns/op | +424% | 7 allocs/op | +| log15 | 4546 ns/op | +1093% | 22 allocs/op | + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 0000000000..5be3704a3e --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/array_go118.go b/vendor/go.uber.org/zap/array_go118.go new file mode 100644 index 0000000000..d0d2c49d69 --- /dev/null +++ b/vendor/go.uber.org/zap/array_go118.go @@ -0,0 +1,156 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 +// +build go1.18 + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 0000000000..9e929cd98e --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,141 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 0000000000..8fb3e202cf --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 0000000000..345ac8b89a --- /dev/null +++ b/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 0000000000..ee6096766a --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,264 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, errors.New("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 0000000000..3c50d7b4d3 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,117 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// # Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// # Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// # Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 0000000000..caa04ceefd --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, errors.New("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 0000000000..65982a51e5 --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 0000000000..bbb745db5b --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,549 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case *bool: + return Boolp(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case *complex128: + return Complex128p(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case *complex64: + return Complex64p(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case *float64: + return Float64p(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case *float32: + return Float32p(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case *int: + return Intp(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case *int64: + return Int64p(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case *int32: + return Int32p(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case *int16: + return Int16p(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case *int8: + return Int8p(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case *string: + return Stringp(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case *uint: + return Uintp(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case *uint64: + return Uint64p(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case *uint32: + return Uint32p(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case *uint16: + return Uint16p(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case *uint8: + return Uint8p(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case *uintptr: + return Uintptrp(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case *time.Time: + return Timep(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case *time.Duration: + return Durationp(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 0000000000..1312875072 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 0000000000..8e1d05e9ab --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 0000000000..3cb46c9e0a --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 1 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 0000000000..632b6831a8 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,133 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// # GET +// +// The GET request returns a JSON description of the current logging level like: +// +// {"level":"info"} +// +// # PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: err.Error()}) + return + } + lvl.SetLevel(requestedLvl) + enc.Encode(payload{Level: lvl.Level()}) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, errors.New("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, errors.New("must specify logging level") + } + return *pld.Level, nil + +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 0000000000..dad583aaa5 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 0000000000..c4d5d02abc --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 0000000000..f673f9947b --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,66 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var _exit = os.Exit + +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + Code int + prev func(code int) +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: _exit} + _exit = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + _exit = se.prev +} + +func (se *StubbedExit) exit(code int) { + se.Exited = true + se.Code = code +} diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 0000000000..5f3e3f1b92 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,35 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 0000000000..db951e19a5 --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,152 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/internal" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +var _ internal.LeveledEnabler = AtomicLevel{} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 0000000000..cd44030d13 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,400 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "os" + "strings" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(io.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.After(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + onFatal := log.onFatal + // nil or WriteThenNoop will lead to continued execution after + // a Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the + // log.Fatal. + if onFatal == nil || onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.After(ent, onFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.After(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktraceFirst + if addStack { + stackDepth = stacktraceFull + } + stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 0000000000..c4f3bca3d2 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,167 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. +func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 0000000000..478c9a10ff --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var _sinkRegistry = newSinkRegistry() + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := sr.factories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + sr.factories[normalized] = factory + return nil +} + +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 0000000000..817a3bde8b --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,176 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _stacktracePool = sync.Pool{ + New: func() interface{} { + return &stacktrace{ + storage: make([]uintptr, 64), + } + }, +} + +type stacktrace struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// stacktraceDepth specifies how deep of a stack trace should be captured. +type stacktraceDepth int + +const ( + // stacktraceFirst captures only the first frame. + stacktraceFirst stacktraceDepth = iota + + // stacktraceFull captures the entire call stack, allocating more + // storage for it if needed. + stacktraceFull +) + +// captureStacktrace captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// captureStacktrace. +// +// The caller must call Free on the returned stacktrace after using it. +func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { + stack := _stacktracePool.Get().(*stacktrace) + + switch depth { + case stacktraceFirst: + stack.pcs = stack.storage[:1] + case stacktraceFull: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == stacktraceFull { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *stacktrace) Free() { + st.frames = nil + st.pcs = nil + _stacktracePool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *stacktrace) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *stacktrace) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +func takeStacktrace(skip int) string { + stack := captureStacktrace(skip+1, stacktraceFull) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := newStackFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// stackFormatter formats a stack trace into a readable string representation. +type stackFormatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// newStackFormatter builds a new stackFormatter. +func newStackFormatter(b *buffer.Buffer) stackFormatter { + return stackFormatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *stackFormatter) FormatStack(stack *stacktrace) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 0000000000..ac387b3e47 --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,416 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// +// is the equivalent of +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Debugln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln uses fmt.Sprintln to construct and log a message. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln uses fmt.Sprintln to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln uses fmt.Sprintln to construct and log a message, then panics. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +// log message with Sprint, Sprintf, or neither. +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 0000000000..c5a1f16225 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 0000000000..f08728e1ec --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := _sinkRegistry.newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return nil, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(io.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 0000000000..a40e93b3ec --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 0000000000..422fd82a6b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 0000000000..1aa5dc3646 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + line.AppendString(c.LineEnding) + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 0000000000..9dfd64051f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 0000000000..31000e91f7 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 0000000000..5769ff3e4e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,451 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// +// If value is string, it uses UnmarshalText. +// +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 0000000000..9d326e95ea --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,300 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "sync" + "time" + + "go.uber.org/multierr" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes an os.Exit(1) after Write. + WriteThenFatal +) + +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or After on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + after CheckWriteHook + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.after = nil + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + ce.ErrorOutput.Sync() + } + + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) + } + putCheckedEntry(ce) +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.after = hook + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 0000000000..06359907af --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,132 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArray and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 0000000000..95bdb0a126 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 0000000000..198def9917 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,77 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 0000000000..7a11237ae9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 0000000000..3921c5cd33 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,562 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc ReflectedEncoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// +// {"foo":"bar","foo":"baz"} +// +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" && final.EncodeLevel != nil { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + final.buf.AppendString(final.LineEnding) + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } + enc.openNamespaces = 0 +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 0000000000..e01a241316 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 +) + +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 0000000000..7af8dadcb3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 0000000000..c3c55ba0d9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 0000000000..dfead0829d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go new file mode 100644 index 0000000000..8746360eca --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" +) + +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 0000000000..dc518055a4 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,230 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 0000000000..9bb32f0557 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 0000000000..d4a1af3d07 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go new file mode 100644 index 0000000000..ecc0dabb74 --- /dev/null +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -0,0 +1,94 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package maps defines various functions useful with maps of any type. +package maps + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} + +// Equal reports whether two maps contain the same key/value pairs. +// Values are compared using ==. +func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || v1 != v2 { + return false + } + } + return true +} + +// EqualFunc is like Equal, but compares values using eq. +// Keys are still compared with ==. +func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { + if len(m1) != len(m2) { + return false + } + for k, v1 := range m1 { + if v2, ok := m2[k]; !ok || !eq(v1, v2) { + return false + } + } + return true +} + +// Clear removes all entries from m, leaving it empty. +func Clear[M ~map[K]V, K comparable, V any](m M) { + for k := range m { + delete(m, k) + } +} + +// Clone returns a copy of m. This is a shallow clone: +// the new keys and values are set using ordinary assignment. +func Clone[M ~map[K]V, K comparable, V any](m M) M { + // Preserve nil in case it matters. + if m == nil { + return nil + } + r := make(M, len(m)) + for k, v := range m { + r[k] = v + } + return r +} + +// Copy copies all key/value pairs in src adding them to dst. +// When a key in src is already present in dst, +// the value in dst will be overwritten by the value associated +// with the key in src. +func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { + for k, v := range src { + dst[k] = v + } +} + +// DeleteFunc deletes any key/value pairs from m for which del returns true. +func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { + for k, v := range m { + if del(k, v) { + delete(m, k) + } + } +} diff --git a/vendor/golang.org/x/exp/typeparams/typeparams_go117.go b/vendor/golang.org/x/exp/typeparams/typeparams_go117.go index efc33f10f3..c1da793168 100644 --- a/vendor/golang.org/x/exp/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/exp/typeparams/typeparams_go117.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package typeparams diff --git a/vendor/golang.org/x/exp/typeparams/typeparams_go118.go b/vendor/golang.org/x/exp/typeparams/typeparams_go118.go index 1176104b20..0b35449d15 100644 --- a/vendor/golang.org/x/exp/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/exp/typeparams/typeparams_go118.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package typeparams diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go index 2681af35af..150f887e7a 100644 --- a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -13,7 +13,7 @@ import ( "sync" ) -// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be // compiled the first time it is needed. type Regexp struct { str string diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go index 524f93022a..2a0123d4b9 100644 --- a/vendor/golang.org/x/mod/modfile/print.go +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -16,7 +16,13 @@ import ( func Format(f *FileSyntax) []byte { pr := &printer{} pr.file(f) - return pr.Bytes() + + // remove trailing blank lines + b := pr.Bytes() + for len(b) > 0 && b[len(b)-1] == '\n' && (len(b) == 1 || b[len(b)-2] == '\n') { + b = b[:len(b)-1] + } + return b } // A printer collects the state during printing of a file or expression. @@ -59,7 +65,11 @@ func (p *printer) newline() { } p.trim() - p.printf("\n") + if b := p.Bytes(); len(b) == 0 || (len(b) >= 2 && b[len(b)-1] == '\n' && b[len(b)-2] == '\n') { + // skip the blank line at top of file or after a blank line + } else { + p.printf("\n") + } for i := 0; i < p.margin; i++ { p.printf("\t") } diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index a503bc2105..5b5bb5e115 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -65,7 +65,7 @@ type Comments struct { } // Comment returns the receiver. This isn't useful by itself, but -// a Comments struct is embedded into all the expression +// a [Comments] struct is embedded into all the expression // implementation types, and this gives each of those a Comment // method to satisfy the Expr interface. func (c *Comments) Comment() *Comments { diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 6bcde8fabe..35fd1f534c 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -5,17 +5,17 @@ // Package modfile implements a parser and formatter for go.mod files. // // The go.mod syntax is described in -// https://golang.org/cmd/go/#hdr-The_go_mod_file. +// https://pkg.go.dev/cmd/go/#hdr-The_go_mod_file. // -// The Parse and ParseLax functions both parse a go.mod file and return an +// The [Parse] and [ParseLax] functions both parse a go.mod file and return an // abstract syntax tree. ParseLax ignores unknown statements and may be used to // parse go.mod files that may have been developed with newer versions of Go. // -// The File struct returned by Parse and ParseLax represent an abstract -// go.mod file. File has several methods like AddNewRequire and DropReplace -// that can be used to programmatically edit a file. +// The [File] struct returned by Parse and ParseLax represent an abstract +// go.mod file. File has several methods like [File.AddNewRequire] and +// [File.DropReplace] that can be used to programmatically edit a file. // -// The Format function formats a File back to a byte slice which can be +// The [Format] function formats a File back to a byte slice which can be // written to a file. package modfile @@ -35,12 +35,13 @@ import ( // A File is the parsed, interpreted form of a go.mod file. type File struct { - Module *Module - Go *Go - Require []*Require - Exclude []*Exclude - Replace []*Replace - Retract []*Retract + Module *Module + Go *Go + Toolchain *Toolchain + Require []*Require + Exclude []*Exclude + Replace []*Replace + Retract []*Retract Syntax *FileSyntax } @@ -58,6 +59,12 @@ type Go struct { Syntax *Line } +// A Toolchain is the toolchain statement. +type Toolchain struct { + Name string // "go1.21rc1" + Syntax *Line +} + // An Exclude is a single exclude statement. type Exclude struct { Mod module.Version @@ -219,7 +226,7 @@ var dontFixRetract VersionFixer = func(_, vers string) (string, error) { // data is the content of the file. // // fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func Parse(file string, data []byte, fix VersionFixer) (*File, error) { return parseToFile(file, data, fix, true) @@ -296,9 +303,13 @@ func parseToFile(file string, data []byte, fix VersionFixer, strict bool) (parse return f, nil } -var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)$`) +var GoVersionRE = lazyregexp.New(`^([1-9][0-9]*)\.(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))?([a-z]+[0-9]+)?$`) var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].*)$`) +// Toolchains must be named beginning with `go1`, +// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. +var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) + func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { // If strict is false, this module is a dependency. // We ignore all unknown directives as well as main-module-only @@ -356,7 +367,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a } } if !fixed { - errorf("invalid go version '%s': must match format 1.23", args[0]) + errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } } @@ -364,6 +375,21 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if strict && !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23.0 or local", args[0]) + return + } + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + case "module": if f.Module != nil { errorf("repeated module statement") @@ -516,7 +542,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V if strings.Contains(ns, "@") { return nil, errorf("replacement module must match format 'path version', not 'path@version'") } - return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") } if filepath.Separator == '/' && strings.Contains(ns, `\`) { return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") @@ -529,7 +555,6 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V } if IsDirectoryPath(ns) { return nil, errorf("replacement module directory path %q cannot have version", ns) - } } return &Replace{ @@ -612,6 +637,22 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, f.Go = &Go{Syntax: line} f.Go.Version = args[0] + case "toolchain": + if f.Toolchain != nil { + errorf("repeated toolchain statement") + return + } + if len(args) != 1 { + errorf("toolchain directive expects exactly one argument") + return + } else if !ToolchainRE.MatchString(args[0]) { + errorf("invalid toolchain version '%s': must match format go1.23 or local", args[0]) + return + } + + f.Toolchain = &Toolchain{Syntax: line} + f.Toolchain.Name = args[0] + case "use": if len(args) != 1 { errorf("usage: %s local/dir", verb) @@ -637,14 +678,15 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, } } -// IsDirectoryPath reports whether the given path should be interpreted -// as a directory path. Just like on the go command line, relative paths +// IsDirectoryPath reports whether the given path should be interpreted as a directory path. +// Just like on the go command line, relative paths starting with a '.' or '..' path component // and rooted paths are directory paths; the rest are module paths. func IsDirectoryPath(ns string) bool { // Because go.mod files can move from one system to another, // we check all known path syntaxes, both Unix and Windows. - return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || - strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || + ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || + strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' } @@ -881,7 +923,7 @@ func (f *File) Format() ([]byte, error) { } // Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire +// To avoid quadratic behavior, modifications like [File.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *File) Cleanup() { @@ -926,7 +968,7 @@ func (f *File) Cleanup() { func (f *File) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version string %q", version) + return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { var hint Expr @@ -944,6 +986,44 @@ func (f *File) AddGoStmt(version string) error { return nil } +// DropGoStmt deletes the go statement from the file. +func (f *File) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *File) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + +func (f *File) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + var hint Expr + if f.Go != nil && f.Go.Syntax != nil { + hint = f.Go.Syntax + } else if f.Module != nil && f.Module.Syntax != nil { + hint = f.Module.Syntax + } + f.Toolchain = &Toolchain{ + Name: name, + Syntax: f.Syntax.addLine(hint, "toolchain", name), + } + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + // AddRequire sets the first require line for path to version vers, // preserving any existing comments for that line and removing all // other lines for path. @@ -995,8 +1075,8 @@ func (f *File) AddNewRequire(path, vers string, indirect bool) { // The requirements in req must specify at most one distinct version for each // module path. // -// If any existing requirements may be removed, the caller should call Cleanup -// after all edits are complete. +// If any existing requirements may be removed, the caller should call +// [File.Cleanup] after all edits are complete. func (f *File) SetRequire(req []*Require) { type elem struct { version string @@ -1387,13 +1467,21 @@ func (f *File) DropRetract(vi VersionInterval) error { func (f *File) SortBlocks() { f.removeDups() // otherwise sorting is unsafe + // semanticSortForExcludeVersionV is the Go version (plus leading "v") at which + // lines in exclude blocks start to use semantic sort instead of lexicographic sort. + // See go.dev/issue/60028. + const semanticSortForExcludeVersionV = "v1.21" + useSemanticSortForExclude := f.Go != nil && semver.Compare("v"+f.Go.Version, semanticSortForExcludeVersionV) >= 0 + for _, stmt := range f.Syntax.Stmt { block, ok := stmt.(*LineBlock) if !ok { continue } less := lineLess - if block.Token[0] == "retract" { + if block.Token[0] == "exclude" && useSemanticSortForExclude { + less = lineExcludeLess + } else if block.Token[0] == "retract" { less = lineRetractLess } sort.SliceStable(block.Line, func(i, j int) bool { @@ -1496,6 +1584,22 @@ func lineLess(li, lj *Line) bool { return len(li.Token) < len(lj.Token) } +// lineExcludeLess reports whether li should be sorted before lj for lines in +// an "exclude" block. +func lineExcludeLess(li, lj *Line) bool { + if len(li.Token) != 2 || len(lj.Token) != 2 { + // Not a known exclude specification. + // Fall back to sorting lexicographically. + return lineLess(li, lj) + } + // An exclude specification has two tokens: ModulePath and Version. + // Compare module path by string order and version by semver rules. + if pi, pj := li.Token[0], lj.Token[0]; pi != pj { + return pi < pj + } + return semver.Compare(li.Token[1], lj.Token[1]) < 0 +} + // lineRetractLess returns whether li should be sorted before lj for lines in // a "retract" block. It treats each line as a version interval. Single versions // are compared as if they were intervals with the same low and high version. diff --git a/vendor/golang.org/x/mod/modfile/work.go b/vendor/golang.org/x/mod/modfile/work.go index 0c0e521525..d7b99376eb 100644 --- a/vendor/golang.org/x/mod/modfile/work.go +++ b/vendor/golang.org/x/mod/modfile/work.go @@ -12,9 +12,10 @@ import ( // A WorkFile is the parsed, interpreted form of a go.work file. type WorkFile struct { - Go *Go - Use []*Use - Replace []*Replace + Go *Go + Toolchain *Toolchain + Use []*Use + Replace []*Replace Syntax *FileSyntax } @@ -33,7 +34,7 @@ type Use struct { // data is the content of the file. // // fix is an optional function that canonicalizes module versions. -// If fix is nil, all module versions must be canonical (module.CanonicalVersion +// If fix is nil, all module versions must be canonical ([module.CanonicalVersion] // must return the same string). func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { fs, err := parse(file, data) @@ -82,7 +83,7 @@ func ParseWork(file string, data []byte, fix VersionFixer) (*WorkFile, error) { } // Cleanup cleans up the file f after any edit operations. -// To avoid quadratic behavior, modifications like DropRequire +// To avoid quadratic behavior, modifications like [WorkFile.DropRequire] // clear the entry but do not remove it from the slice. // Cleanup cleans out all the cleared entries. func (f *WorkFile) Cleanup() { @@ -109,7 +110,7 @@ func (f *WorkFile) Cleanup() { func (f *WorkFile) AddGoStmt(version string) error { if !GoVersionRE.MatchString(version) { - return fmt.Errorf("invalid language version string %q", version) + return fmt.Errorf("invalid language version %q", version) } if f.Go == nil { stmt := &Line{Token: []string{"go", version}} @@ -117,7 +118,7 @@ func (f *WorkFile) AddGoStmt(version string) error { Version: version, Syntax: stmt, } - // Find the first non-comment-only block that's and add + // Find the first non-comment-only block and add // the go statement before it. That will keep file comments at the top. i := 0 for i = 0; i < len(f.Syntax.Stmt); i++ { @@ -133,6 +134,56 @@ func (f *WorkFile) AddGoStmt(version string) error { return nil } +func (f *WorkFile) AddToolchainStmt(name string) error { + if !ToolchainRE.MatchString(name) { + return fmt.Errorf("invalid toolchain name %q", name) + } + if f.Toolchain == nil { + stmt := &Line{Token: []string{"toolchain", name}} + f.Toolchain = &Toolchain{ + Name: name, + Syntax: stmt, + } + // Find the go line and add the toolchain line after it. + // Or else find the first non-comment-only block and add + // the toolchain line before it. That will keep file comments at the top. + i := 0 + for i = 0; i < len(f.Syntax.Stmt); i++ { + if line, ok := f.Syntax.Stmt[i].(*Line); ok && len(line.Token) > 0 && line.Token[0] == "go" { + i++ + goto Found + } + } + for i = 0; i < len(f.Syntax.Stmt); i++ { + if _, ok := f.Syntax.Stmt[i].(*CommentBlock); !ok { + break + } + } + Found: + f.Syntax.Stmt = append(append(f.Syntax.Stmt[:i:i], stmt), f.Syntax.Stmt[i:]...) + } else { + f.Toolchain.Name = name + f.Syntax.updateLine(f.Toolchain.Syntax, "toolchain", name) + } + return nil +} + +// DropGoStmt deletes the go statement from the file. +func (f *WorkFile) DropGoStmt() { + if f.Go != nil { + f.Go.Syntax.markRemoved() + f.Go = nil + } +} + +// DropToolchainStmt deletes the toolchain statement from the file. +func (f *WorkFile) DropToolchainStmt() { + if f.Toolchain != nil { + f.Toolchain.Syntax.markRemoved() + f.Toolchain = nil + } +} + func (f *WorkFile) AddUse(diskPath, modulePath string) error { need := true for _, d := range f.Use { diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index e9dec6e614..2a364b229b 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -4,7 +4,7 @@ // Package module defines the module.Version type along with support code. // -// The module.Version type is a simple Path, Version pair: +// The [module.Version] type is a simple Path, Version pair: // // type Version struct { // Path string @@ -12,7 +12,7 @@ // } // // There are no restrictions imposed directly by use of this structure, -// but additional checking functions, most notably Check, verify that +// but additional checking functions, most notably [Check], verify that // a particular path, version pair is valid. // // # Escaped Paths @@ -140,7 +140,7 @@ type ModuleError struct { Err error } -// VersionError returns a ModuleError derived from a Version and error, +// VersionError returns a [ModuleError] derived from a [Version] and error, // or err itself if it is already such an error. func VersionError(v Version, err error) error { var mErr *ModuleError @@ -169,7 +169,7 @@ func (e *ModuleError) Unwrap() error { return e.Err } // An InvalidVersionError indicates an error specific to a version, with the // module path unknown or specified externally. // -// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError // must not wrap a ModuleError. type InvalidVersionError struct { Version string @@ -193,8 +193,8 @@ func (e *InvalidVersionError) Error() string { func (e *InvalidVersionError) Unwrap() error { return e.Err } // An InvalidPathError indicates a module, import, or file path doesn't -// satisfy all naming constraints. See CheckPath, CheckImportPath, -// and CheckFilePath for specific restrictions. +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. type InvalidPathError struct { Kind string // "module", "import", or "file" Path string @@ -294,7 +294,7 @@ func fileNameOK(r rune) bool { } // CheckPath checks that a module path is valid. -// A valid module path is a valid import path, as checked by CheckImportPath, +// A valid module path is a valid import path, as checked by [CheckImportPath], // with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, @@ -380,7 +380,7 @@ const ( // checkPath returns an error describing why the path is not valid. // Because these checks apply to module, import, and file paths, // and because other checks may be applied, the caller is expected to wrap -// this error with InvalidPathError. +// this error with [InvalidPathError]. func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") @@ -532,7 +532,7 @@ var badWindowsNames = []string{ // they require ".vN" instead of "/vN", and for all N, not just N >= 2. // SplitPathVersion returns with ok = false when presented with // a path whose last path element does not satisfy the constraints -// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) @@ -582,7 +582,7 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. // -// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. func MatchPathMajor(v, pathMajor string) bool { return CheckPathMajor(v, pathMajor) == nil } @@ -622,7 +622,7 @@ func CheckPathMajor(v, pathMajor string) error { // PathMajorPrefix returns the major-version tag prefix implied by pathMajor. // An empty PathMajorPrefix allows either v0 or v1. // -// Note that MatchPathMajor may accept some versions that do not actually begin +// Note that [MatchPathMajor] may accept some versions that do not actually begin // with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' // pathMajor, even though that pathMajor implies 'v1' tagging. func PathMajorPrefix(pathMajor string) string { @@ -643,7 +643,7 @@ func PathMajorPrefix(pathMajor string) string { } // CanonicalVersion returns the canonical form of the version string v. -// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". func CanonicalVersion(v string) string { cv := semver.Canonical(v) if semver.Build(v) == "+incompatible" { @@ -652,8 +652,8 @@ func CanonicalVersion(v string) string { return cv } -// Sort sorts the list by Path, breaking ties by comparing Version fields. -// The Version fields are interpreted as semantic versions (using semver.Compare) +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { @@ -793,7 +793,7 @@ func unescapeString(escaped string) (string, bool) { } // MatchPrefixPatterns reports whether any path prefix of target matches one of -// the glob patterns (as defined by path.Match) in the comma-separated globs +// the glob patterns (as defined by [path.Match]) in the comma-separated globs // list. This implements the algorithm used when matching a module path to the // GOPRIVATE environment variable, as described by 'go help module-private'. // diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go index f04ad37886..9cf19d3254 100644 --- a/vendor/golang.org/x/mod/module/pseudo.go +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -125,7 +125,7 @@ func IsPseudoVersion(v string) bool { } // IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, -// timestamp, and revision, as returned by ZeroPseudoVersion. +// timestamp, and revision, as returned by [ZeroPseudoVersion]. func IsZeroPseudoVersion(v string) bool { return v == ZeroPseudoVersion(semver.Major(v)) } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index a30a22bf20..9a2dfd33a7 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool { return vs[i] < vs[j] } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..948a3ee63d --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,135 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := withCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// It blocks until the new goroutine can be added without the number of +// active goroutines in the group exceeding the configured limit. +// +// The first call to return a non-nil error cancels the group's context, if the +// group was created by calling WithContext. The error will be returned by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if len(g.sem) != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 0000000000..f93c740b63 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 0000000000..88ce33434e --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdfec..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 5627d70e39..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index d60ab1b419..0000000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 0000000000..df7aa02db6 --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + sIn, ok := tIn.(transform.SpanningTransformer) + if !ok { + sIn = dummySpan{tIn} + } + sNotIn, ok := tNotIn.(transform.SpanningTransformer) + if !ok { + sNotIn = dummySpan{tNotIn} + } + + a := &cond{ + tIn: sIn, + tNotIn: sNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type dummySpan struct{ transform.Transformer } + +func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +type cond struct { + tIn, tNotIn transform.SpanningTransformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.SpanningTransformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +// This implementation of Span doesn't help all too much, but it needs to be +// there to satisfy this package's Transformer interface. +// TODO: there are certainly room for improvements, though. For example, if +// t.t == transform.Nop (which will a common occurrence) it will save a bundle +// to special-case that loop. +func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { + p := 0 + for n < len(src) && err == nil { + // Don't process too much at a time as the Spanner that will be + // called on this block may terminate early. + const maxChunk = 4096 + max := len(src) + if v := n + maxChunk; v < max { + max = v + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) + n += n2 + if err2 != nil { + return n, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = n + size + } + return n, err +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 0000000000..930e87fedb --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,355 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes // import "golang.org/x/text/runes" + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// NotIn creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { + return t.t.Span(b, atEOF) +} + +func (t Transformer) Reset() { t.t.Reset() } + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Span implements transform.Spanner. +func (t remove) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) { + err = transform.ErrEndOfSpan + break + } + n += size + } + return +} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = r + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 0000000000..8f6c7f493f --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,430 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. +type Limiter struct { + mu sync.Mutex + limit Limit + burst int + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.burst +} + +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow reports whether an event may happen now. +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time t. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(math.MaxInt64) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(t time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(t) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(t time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + t, tokens := r.lim.advance(t) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = t + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(t) { + r.lim.lastEvent = prevEvent + } + } +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. +// Usage example: +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(t) + } + // Reserve + r := lim.reserveN(t, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(t) + if delay == 0 { + return nil + } + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing + select { + case <-ch: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + defer lim.mu.Unlock() + + if lim.limit == Inf { + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: t, + } + } else if lim.limit == 0 { + var ok bool + if lim.burst >= n { + ok = true + lim.burst -= n + } + return Reservation{ + ok: ok, + lim: lim, + tokens: lim.burst, + timeToAct: t, + } + } + + t, tokens := lim.advance(t) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = t.Add(waitDuration) + + // Update state + lim.last = t + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } + + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +// advance requires that lim.mu is held. +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { + last := lim.last + if t.Before(last) { + last = t + } + + // Calculate the new number of tokens, due to time that passed. + elapsed := t.Sub(last) + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + return t, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + if limit <= 0 { + return InfDuration + } + seconds := tokens / float64(limit) + return time.Duration(float64(time.Second) * seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + if limit <= 0 { + return 0 + } + return d.Seconds() * float64(limit) +} diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 0000000000..6ba99ddb67 --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index 44ada22a03..5da33c7e6e 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -24,6 +24,10 @@ type Analyzer struct { // (no capital or period, max ~60 letters). Doc string + // URL holds an optional link to a web page with additional + // documentation for this analyzer. + URL string + // Flags defines any flags accepted by the analyzer. // The manner in which these flags are exposed to the user // depends on the driver which runs the analyzer. @@ -135,32 +139,24 @@ type Pass struct { // See comments for ExportObjectFact. ExportPackageFact func(fact Fact) - // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes - // in unspecified order. - // WARNING: This is an experimental API and may change in the future. + // AllPackageFacts returns a new slice containing all package + // facts of the analysis's FactTypes in unspecified order. AllPackageFacts func() []PackageFact - // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes - // in unspecified order. - // WARNING: This is an experimental API and may change in the future. + // AllObjectFacts returns a new slice containing all object + // facts of the analysis's FactTypes in unspecified order. AllObjectFacts func() []ObjectFact - // typeErrors contains types.Errors that are associated with the pkg. - typeErrors []types.Error - /* Further fields may be added in future. */ - // For example, suggested or applied refactorings. } // PackageFact is a package together with an associated fact. -// WARNING: This is an experimental API and may change in the future. type PackageFact struct { Package *types.Package Fact Fact } // ObjectFact is an object together with an associated fact. -// WARNING: This is an experimental API and may change in the future. type ObjectFact struct { Object types.Object Fact Fact diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index 5cdcf46d2a..f67c97294b 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -20,14 +20,24 @@ type Diagnostic struct { Category string // optional Message string - // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform - // edits to a file that address the diagnostic. - // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? + // URL is the optional location of a web page that provides + // additional documentation for this diagnostic. + // + // If URL is empty but a Category is specified, then the + // Analysis driver should treat the URL as "#"+Category. + // + // The URL may be relative. If so, the base URL is that of the + // Analyzer that produced the diagnostic; + // see https://pkg.go.dev/net/url#URL.ResolveReference. + URL string + + // SuggestedFixes contains suggested fixes for a diagnostic + // which can be used to perform edits to a file that address + // the diagnostic. + // // Diagnostics should not contain SuggestedFixes that overlap. - // Experimental: This API is experimental and may change in the future. SuggestedFixes []SuggestedFix // optional - // Experimental: This API is experimental and may change in the future. Related []RelatedInformation // optional } @@ -41,12 +51,12 @@ type RelatedInformation struct { Message string } -// A SuggestedFix is a code change associated with a Diagnostic that a user can choose -// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged -// by the diagnostic. -// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix -// should not contain edits for other packages. -// Experimental: This API is experimental and may change in the future. +// A SuggestedFix is a code change associated with a Diagnostic that a +// user can choose to apply to their code. Usually the SuggestedFix is +// meant to fix the issue flagged by the diagnostic. +// +// TextEdits for a SuggestedFix should not overlap, +// nor contain edits for other packages. type SuggestedFix struct { // A description for this suggested fix to be shown to a user deciding // whether to accept it. @@ -56,7 +66,6 @@ type SuggestedFix struct { // A TextEdit represents the replacement of the code between Pos and End with the new text. // Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. -// Experimental: This API is experimental and may change in the future. type TextEdit struct { // For a pure insertion, End can either be set to Pos or token.NoPos. Pos token.Pos diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index c5429c9e23..44867d599e 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -191,7 +191,7 @@ and buildtag, inspect the raw text of Go source files or even non-Go files such as assembly. To report a diagnostic against a line of a raw text file, use the following sequence: - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { ... } tf := fset.AddFile(filename, -1, len(content)) tf.SetLinesForContent(content) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go new file mode 100644 index 0000000000..6976f0d909 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -0,0 +1,47 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package appends defines an Analyzer that detects +// if there is only one variable in append. +package appends + +import ( + _ "embed" + "go/ast" + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "appends", + Doc: analysisutil.MustExtractDoc(doc, "appends"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/appends", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + b, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Builtin) + if ok && b.Name() == "append" && len(call.Args) == 1 { + pass.ReportRangef(call, "append with no values") + } + }) + + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go new file mode 100644 index 0000000000..2e6a2e010b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/appends/doc.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package appends defines an Analyzer that detects +// if there is only one variable in append. +// +// # Analyzer appends +// +// appends: check for missing values after append +// +// This checker reports calls to append that pass +// no values to be appended to the slice. +// +// s := []string{"a", "b", "c"} +// _ = append(s) +// +// Such calls are always no-ops and often indicate an +// underlying mistake. +package appends diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index 7288559fc0..e24dac9865 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -27,6 +27,7 @@ const Doc = "report mismatches between assembly files and Go declarations" var Analyzer = &analysis.Analyzer{ Name: "asmdecl", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/asmdecl", Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go index 89146b7334..3bfd501226 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/assign.go @@ -2,13 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package assign defines an Analyzer that detects useless assignments. package assign // TODO(adonovan): check also for assignments to struct fields inside // methods that are on T instead of *T. import ( + _ "embed" "fmt" "go/ast" "go/token" @@ -18,18 +18,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check for useless assignments - -This checker reports assignments of the form x = x or a[i] = a[i]. -These are almost always useless, and even when they aren't they are -usually a mistake.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "assign", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "assign"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/assign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -79,7 +78,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // isMapIndex returns true if e is a map index expression. func isMapIndex(info *types.Info, e ast.Expr) bool { - if idx, ok := analysisutil.Unparen(e).(*ast.IndexExpr); ok { + if idx, ok := astutil.Unparen(e).(*ast.IndexExpr); ok { if typ := info.Types[idx.X].Type; typ != nil { _, ok := typ.Underlying().(*types.Map) return ok diff --git a/vendor/golang.org/x/tools/go/analysis/passes/assign/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/assign/doc.go new file mode 100644 index 0000000000..a4b1b64c51 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/assign/doc.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package assign defines an Analyzer that detects useless assignments. +// +// # Analyzer assign +// +// assign: check for useless assignments +// +// This checker reports assignments of the form x = x or a[i] = a[i]. +// These are almost always useless, and even when they aren't they are +// usually a mistake. +package assign diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go index 9261db7e4e..931f9ca754 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/atomic.go @@ -2,38 +2,37 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package atomic defines an Analyzer that checks for common mistakes -// using the sync/atomic package. package atomic import ( + _ "embed" "go/ast" "go/token" - "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" ) -const Doc = `check for common mistakes using the sync/atomic package - -The atomic checker looks for assignment statements of the form: - - x = atomic.AddUint64(&x, 1) - -which are not atomic.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "atomic", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "atomic"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomic", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, } func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "sync/atomic") { + return nil, nil // doesn't directly import sync/atomic + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -53,18 +52,8 @@ func run(pass *analysis.Pass) (interface{}, error) { if !ok { continue } - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - continue - } - pkgIdent, _ := sel.X.(*ast.Ident) - pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) - if !ok || pkgName.Imported().Path() != "sync/atomic" { - continue - } - - switch sel.Sel.Name { - case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr": + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if analysisutil.IsFunctionNamed(fn, "sync/atomic", "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr") { checkAtomicAddAssignment(pass, n.Lhs[i], call) } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomic/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/atomic/doc.go new file mode 100644 index 0000000000..5aafe25d32 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomic/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atomic defines an Analyzer that checks for common mistakes +// using the sync/atomic package. +// +// # Analyzer atomic +// +// atomic: check for common mistakes using the sync/atomic package +// +// The atomic checker looks for assignment statements of the form: +// +// x = atomic.AddUint64(&x, 1) +// +// which are not atomic. +package atomic diff --git a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go index e2e1a4f67c..aff6d25b3e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/atomicalign/atomicalign.go @@ -18,6 +18,7 @@ import ( "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" ) const Doc = "check for non-64-bits-aligned arguments to sync/atomic functions" @@ -25,6 +26,7 @@ const Doc = "check for non-64-bits-aligned arguments to sync/atomic functions" var Analyzer = &analysis.Analyzer{ Name: "atomicalign", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/atomicalign", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -41,31 +43,20 @@ func run(pass *analysis.Pass) (interface{}, error) { nodeFilter := []ast.Node{ (*ast.CallExpr)(nil), } + funcNames := []string{ + "AddInt64", "AddUint64", + "LoadInt64", "LoadUint64", + "StoreInt64", "StoreUint64", + "SwapInt64", "SwapUint64", + "CompareAndSwapInt64", "CompareAndSwapUint64", + } inspect.Preorder(nodeFilter, func(node ast.Node) { call := node.(*ast.CallExpr) - sel, ok := call.Fun.(*ast.SelectorExpr) - if !ok { - return - } - pkgIdent, ok := sel.X.(*ast.Ident) - if !ok { - return - } - pkgName, ok := pass.TypesInfo.Uses[pkgIdent].(*types.PkgName) - if !ok || pkgName.Imported().Path() != "sync/atomic" { - return - } - - switch sel.Sel.Name { - case "AddInt64", "AddUint64", - "LoadInt64", "LoadUint64", - "StoreInt64", "StoreUint64", - "SwapInt64", "SwapUint64", - "CompareAndSwapInt64", "CompareAndSwapUint64": - + fn := typeutil.StaticCallee(pass.TypesInfo, call) + if analysisutil.IsFunctionNamed(fn, "sync/atomic", funcNames...) { // For all the listed functions, the expression to check is always the first function argument. - check64BitAlignment(pass, sel.Sel.Name, call.Args[0]) + check64BitAlignment(pass, fn.Name(), call.Args[0]) } }) @@ -74,8 +65,8 @@ func run(pass *analysis.Pass) (interface{}, error) { func check64BitAlignment(pass *analysis.Pass, funcName string, arg ast.Expr) { // Checks the argument is made of the address operator (&) applied to - // to a struct field (as opposed to a variable as the first word of - // uint64 and int64 variables can be relied upon to be 64-bit aligned. + // a struct field (as opposed to a variable as the first word of + // uint64 and int64 variables can be relied upon to be 64-bit aligned). unary, ok := arg.(*ast.UnaryExpr) if !ok || unary.Op != token.AND { return diff --git a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go index 0d8b0bf4f1..564329774e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/bools/bools.go @@ -14,6 +14,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) @@ -22,6 +23,7 @@ const Doc = "check for common mistakes involving boolean operators" var Analyzer = &analysis.Analyzer{ Name: "bools", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/bools", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -82,7 +84,7 @@ func (op boolOp) commutativeSets(info *types.Info, e *ast.BinaryExpr, seen map[* i := 0 var sets [][]ast.Expr for j := 0; j <= len(exprs); j++ { - if j == len(exprs) || hasSideEffects(info, exprs[j]) { + if j == len(exprs) || analysisutil.HasSideEffects(info, exprs[j]) { if i < j { sets = append(sets, exprs[i:j]) } @@ -161,46 +163,13 @@ func (op boolOp) checkSuspect(pass *analysis.Pass, exprs []ast.Expr) { } } -// hasSideEffects reports whether evaluation of e has side effects. -func hasSideEffects(info *types.Info, e ast.Expr) bool { - safe := true - ast.Inspect(e, func(node ast.Node) bool { - switch n := node.(type) { - case *ast.CallExpr: - typVal := info.Types[n.Fun] - switch { - case typVal.IsType(): - // Type conversion, which is safe. - case typVal.IsBuiltin(): - // Builtin func, conservatively assumed to not - // be safe for now. - safe = false - return false - default: - // A non-builtin func or method call. - // Conservatively assume that all of them have - // side effects for now. - safe = false - return false - } - case *ast.UnaryExpr: - if n.Op == token.ARROW { - safe = false - return false - } - } - return true - }) - return !safe -} - // split returns a slice of all subexpressions in e that are connected by op. // For example, given 'a || (b || c) || d' with the or op, // split returns []{d, c, b, a}. // seen[e] is already true; any newly processed exprs are added to seen. func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.Expr) { for { - e = unparen(e) + e = astutil.Unparen(e) if b, ok := e.(*ast.BinaryExpr); ok && b.Op == op.tok { seen[b] = true exprs = append(exprs, op.split(b.Y, seen)...) @@ -212,14 +181,3 @@ func (op boolOp) split(e ast.Expr, seen map[*ast.BinaryExpr]bool) (exprs []ast.E } return } - -// unparen returns e with any enclosing parentheses stripped. -func unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go index 02b7b18b3f..f077ea2824 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go @@ -6,8 +6,6 @@ // representation of an error-free package and returns the set of all // functions within it. It does not report any diagnostics itself but // may be used as an input to other analyzers. -// -// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. package buildssa import ( @@ -22,20 +20,19 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "buildssa", Doc: "build SSA-form IR for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/buildssa", Run: run, ResultType: reflect.TypeOf(new(SSA)), } // SSA provides SSA-form intermediate representation for all the -// non-blank source functions in the current package. +// source functions in the current package. type SSA struct { Pkg *ssa.Package SrcFuncs []*ssa.Function } func run(pass *analysis.Pass) (interface{}, error) { - // Plundered from ssautil.BuildPackage. - // We must create a new Program for each Package because the // analysis API provides no place to hang a Program shared by // all Packages. Consequently, SSA Packages and Functions do not @@ -52,20 +49,10 @@ func run(pass *analysis.Pass) (interface{}, error) { prog := ssa.NewProgram(pass.Fset, mode) - // Create SSA packages for all imports. - // Order is not significant. - created := make(map[*types.Package]bool) - var createAll func(pkgs []*types.Package) - createAll = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !created[p] { - created[p] = true - prog.CreatePackage(p, nil, nil, true) - createAll(p.Imports()) - } - } + // Create SSA packages for direct imports. + for _, p := range pass.Pkg.Imports() { + prog.CreatePackage(p, nil, nil, true) } - createAll(pass.Pkg.Imports()) // Create and build the primary package. ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) @@ -77,16 +64,6 @@ func run(pass *analysis.Pass) (interface{}, error) { for _, f := range pass.Files { for _, decl := range f.Decls { if fdecl, ok := decl.(*ast.FuncDecl); ok { - - // SSA will not build a Function - // for a FuncDecl named blank. - // That's arguably too strict but - // relaxing it would break uniqueness of - // names of package members. - if fdecl.Name.Name == "_" { - continue - } - // (init functions have distinct Func // objects named "init" and distinct // ssa.Functions named "init#1", ...) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index 775e507a34..55bdad78b7 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -25,6 +25,7 @@ const Doc = "check //go:build and // +build directives" var Analyzer = &analysis.Analyzer{ Name: "buildtag", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/buildtag", Run: runBuildTag, } @@ -39,7 +40,7 @@ func runBuildTag(pass *analysis.Pass) (interface{}, error) { } for _, name := range pass.IgnoredFiles { if strings.HasSuffix(name, ".go") { - f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments|parser.SkipObjectResolution) if err != nil { // Not valid Go source code - not our job to diagnose, so ignore. return nil, nil diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index b61ee5c3dc..4e86439757 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -19,6 +19,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" ) const debug = false @@ -35,6 +36,7 @@ or slice to C, either directly, or via a pointer, array, or struct.` var Analyzer = &analysis.Analyzer{ Name: "cgocall", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/cgocall", RunDespiteErrors: true, Run: run, } @@ -63,7 +65,7 @@ func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(t // Is this a C.f() call? var name string - if sel, ok := analysisutil.Unparen(call.Fun).(*ast.SelectorExpr); ok { + if sel, ok := astutil.Unparen(call.Fun).(*ast.SelectorExpr); ok { if id, ok := sel.X.(*ast.Ident); ok && id.Name == "C" { name = sel.Sel.Name } @@ -179,7 +181,7 @@ func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*a // If f is a cgo-generated file, Position reports // the original file, honoring //line directives. filename := fset.Position(raw.Pos()).Filename - f, err := parser.ParseFile(fset, filename, nil, parser.Mode(0)) + f, err := parser.ParseFile(fset, filename, nil, parser.SkipObjectResolution) if err != nil { return nil, nil, fmt.Errorf("can't parse raw cgo file: %v", err) } @@ -270,6 +272,7 @@ func typeCheckCgoSourceFiles(fset *token.FileSet, pkg *types.Package, files []*a Sizes: sizes, Error: func(error) {}, // ignore errors (e.g. unused import) } + setGoVersion(tc, pkg) // It's tempting to record the new types in the // existing pass.TypesInfo, but we don't own it. diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go120.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go120.go new file mode 100644 index 0000000000..06b54946d7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go120.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 + +package cgocall + +import "go/types" + +func setGoVersion(tc *types.Config, pkg *types.Package) { + // no types.Package.GoVersion until Go 1.21 +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go121.go b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go121.go new file mode 100644 index 0000000000..2a3e1fad22 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall_go121.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package cgocall + +import "go/types" + +func setGoVersion(tc *types.Config, pkg *types.Package) { + tc.GoVersion = pkg.GoVersion() +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go index 64e184d343..847063bb32 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go @@ -37,6 +37,7 @@ should be replaced by: var Analyzer = &analysis.Analyzer{ Name: "composites", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/composite", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, @@ -71,7 +72,7 @@ func run(pass *analysis.Pass) (interface{}, error) { } var structuralTypes []types.Type switch typ := typ.(type) { - case *typeparams.TypeParam: + case *types.TypeParam: terms, err := typeparams.StructuralTerms(typ) if err != nil { return // invalid type @@ -162,7 +163,7 @@ func isLocalType(pass *analysis.Pass, typ types.Type) bool { case *types.Named: // names in package foo are local to foo_test too return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test") - case *typeparams.TypeParam: + case *types.TypeParam: return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test") } return false diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index 8cc93e94dc..6cbbc7e814 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/typeparams" ) @@ -29,6 +30,7 @@ values should be referred to through a pointer.` var Analyzer = &analysis.Analyzer{ Name: "copylocks", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/copylocks", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, @@ -222,6 +224,8 @@ func (path typePath) String() string { } func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { + x = astutil.Unparen(x) // ignore parens on rhs + if _, ok := x.(*ast.CompositeLit); ok { return nil } @@ -230,7 +234,7 @@ func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { return nil } if star, ok := x.(*ast.StarExpr); ok { - if _, ok := star.X.(*ast.CallExpr); ok { + if _, ok := astutil.Unparen(star.X).(*ast.CallExpr); ok { // A call may return a pointer to a zero value. return nil } @@ -241,29 +245,23 @@ func lockPathRhs(pass *analysis.Pass, x ast.Expr) typePath { // lockPath returns a typePath describing the location of a lock value // contained in typ. If there is no contained lock, it returns nil. // -// The seenTParams map is used to short-circuit infinite recursion via type -// parameters. -func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.TypeParam]bool) typePath { - if typ == nil { +// The seen map is used to short-circuit infinite recursion due to type cycles. +func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typePath { + if typ == nil || seen[typ] { return nil } + if seen == nil { + seen = make(map[types.Type]bool) + } + seen[typ] = true - if tpar, ok := typ.(*typeparams.TypeParam); ok { - if seenTParams == nil { - // Lazily allocate seenTParams, since the common case will not involve - // any type parameters. - seenTParams = make(map[*typeparams.TypeParam]bool) - } - if seenTParams[tpar] { - return nil - } - seenTParams[tpar] = true + if tpar, ok := typ.(*types.TypeParam); ok { terms, err := typeparams.StructuralTerms(tpar) if err != nil { return nil // invalid type } for _, term := range terms { - subpath := lockPath(tpkg, term.Type(), seenTParams) + subpath := lockPath(tpkg, term.Type(), seen) if len(subpath) > 0 { if term.Tilde() { // Prepend a tilde to our lock path entry to clarify the resulting @@ -297,7 +295,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.T ttyp, ok := typ.Underlying().(*types.Tuple) if ok { for i := 0; i < ttyp.Len(); i++ { - subpath := lockPath(tpkg, ttyp.At(i).Type(), seenTParams) + subpath := lockPath(tpkg, ttyp.At(i).Type(), seen) if subpath != nil { return append(subpath, typ.String()) } @@ -322,16 +320,14 @@ func lockPath(tpkg *types.Package, typ types.Type, seenTParams map[*typeparams.T // In go1.10, sync.noCopy did not implement Locker. // (The Unlock method was added only in CL 121876.) // TODO(adonovan): remove workaround when we drop go1.10. - if named, ok := typ.(*types.Named); ok && - named.Obj().Name() == "noCopy" && - named.Obj().Pkg().Path() == "sync" { + if analysisutil.IsNamedType(typ, "sync", "noCopy") { return []string{typ.String()} } nfields := styp.NumFields() for i := 0; i < nfields; i++ { ftyp := styp.Field(i).Type() - subpath := lockPath(tpkg, ftyp, seenTParams) + subpath := lockPath(tpkg, ftyp, seen) if subpath != nil { return append(subpath, typ.String()) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go index 73746d6f04..d21adeee90 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -24,6 +24,7 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "ctrlflow", Doc: "build a control-flow graph", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ctrlflow", Run: run, ResultType: reflect.TypeOf(new(CFGs)), FactTypes: []analysis.Fact{new(noReturn)}, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go index 9ea137386b..1a83bddbce 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go @@ -12,6 +12,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" ) @@ -28,11 +29,16 @@ errors is discouraged.` var Analyzer = &analysis.Analyzer{ Name: "deepequalerrors", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/deepequalerrors", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "reflect") { + return nil, nil // doesn't directly import reflect + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -40,11 +46,8 @@ func run(pass *analysis.Pass) (interface{}, error) { } inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) - fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) - if !ok { - return - } - if fn.FullName() == "reflect.DeepEqual" && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { + fn, _ := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if analysisutil.IsFunctionNamed(fn, "reflect", "DeepEqual") && hasError(pass, call.Args[0]) && hasError(pass, call.Args[1]) { pass.ReportRangef(call, "avoid using reflect.DeepEqual with errors") } }) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go new file mode 100644 index 0000000000..5e8e80a6a7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/defers/defers.go @@ -0,0 +1,59 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package defers + +import ( + _ "embed" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +//go:embed doc.go +var doc string + +// Analyzer is the defers analyzer. +var Analyzer = &analysis.Analyzer{ + Name: "defers", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/defers", + Doc: analysisutil.MustExtractDoc(doc, "defers"), + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + if !analysisutil.Imports(pass.Pkg, "time") { + return nil, nil + } + + checkDeferCall := func(node ast.Node) bool { + switch v := node.(type) { + case *ast.CallExpr: + if analysisutil.IsFunctionNamed(typeutil.StaticCallee(pass.TypesInfo, v), "time", "Since") { + pass.Reportf(v.Pos(), "call to time.Since is not deferred") + } + case *ast.FuncLit: + return false // prune + } + return true + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.DeferStmt)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + d := n.(*ast.DeferStmt) + ast.Inspect(d.Call, checkDeferCall) + }) + + return nil, nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go new file mode 100644 index 0000000000..bdb1351628 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/defers/doc.go @@ -0,0 +1,25 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defers defines an Analyzer that checks for common mistakes in defer +// statements. +// +// # Analyzer defers +// +// defers: report common mistakes in defer statements +// +// The defers analyzer reports a diagnostic when a defer statement would +// result in a non-deferred call to time.Since, as experience has shown +// that this is nearly always a mistake. +// +// For example: +// +// start := time.Now() +// ... +// defer recordLatency(time.Since(start)) // error: call to time.Since is not deferred +// +// The correct code is: +// +// defer func() { recordLatency(time.Since(start)) }() +package defers diff --git a/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go new file mode 100644 index 0000000000..2691f189aa --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -0,0 +1,209 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package directive defines an Analyzer that checks known Go toolchain directives. +package directive + +import ( + "go/ast" + "go/parser" + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" +) + +const Doc = `check Go toolchain directives such as //go:debug + +This analyzer checks for problems with known Go toolchain directives +in all Go source files in a package directory, even those excluded by +//go:build constraints, and all non-Go source files too. + +For //go:debug (see https://go.dev/doc/godebug), the analyzer checks +that the directives are placed only in Go source files, only above the +package comment, and only in package main or *_test.go files. + +Support for other known directives may be added in the future. + +This analyzer does not check //go:build, which is handled by the +buildtag analyzer. +` + +var Analyzer = &analysis.Analyzer{ + Name: "directive", + Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/directive", + Run: runDirective, +} + +func runDirective(pass *analysis.Pass) (interface{}, error) { + for _, f := range pass.Files { + checkGoFile(pass, f) + } + for _, name := range pass.OtherFiles { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + for _, name := range pass.IgnoredFiles { + if strings.HasSuffix(name, ".go") { + f, err := parser.ParseFile(pass.Fset, name, nil, parser.ParseComments) + if err != nil { + // Not valid Go source code - not our job to diagnose, so ignore. + continue + } + checkGoFile(pass, f) + } else { + if err := checkOtherFile(pass, name); err != nil { + return nil, err + } + } + } + return nil, nil +} + +func checkGoFile(pass *analysis.Pass, f *ast.File) { + check := newChecker(pass, pass.Fset.File(f.Package).Name(), f) + + for _, group := range f.Comments { + // A +build comment is ignored after or adjoining the package declaration. + if group.End()+1 >= f.Package { + check.inHeader = false + } + // A //go:build comment is ignored after the package declaration + // (but adjoining it is OK, in contrast to +build comments). + if group.Pos() >= f.Package { + check.inHeader = false + } + + // Check each line of a //-comment. + for _, c := range group.List { + check.comment(c.Slash, c.Text) + } + } +} + +func checkOtherFile(pass *analysis.Pass, filename string) error { + // We cannot use the Go parser, since is not a Go source file. + // Read the raw bytes instead. + content, tf, err := analysisutil.ReadFile(pass.Fset, filename) + if err != nil { + return err + } + + check := newChecker(pass, filename, nil) + check.nonGoFile(token.Pos(tf.Base()), string(content)) + return nil +} + +type checker struct { + pass *analysis.Pass + filename string + file *ast.File // nil for non-Go file + inHeader bool // in file header (before package declaration) + inStar bool // currently in a /* */ comment +} + +func newChecker(pass *analysis.Pass, filename string, file *ast.File) *checker { + return &checker{ + pass: pass, + filename: filename, + file: file, + inHeader: true, + } +} + +func (check *checker) nonGoFile(pos token.Pos, fullText string) { + // Process each line. + text := fullText + inStar := false + for text != "" { + offset := len(fullText) - len(text) + var line string + line, text, _ = strings.Cut(text, "\n") + + if !inStar && strings.HasPrefix(line, "//") { + check.comment(pos+token.Pos(offset), line) + continue + } + + // Skip over, cut out any /* */ comments, + // to avoid being confused by a commented-out // comment. + for { + line = strings.TrimSpace(line) + if inStar { + var ok bool + _, line, ok = strings.Cut(line, "*/") + if !ok { + break + } + inStar = false + continue + } + line, inStar = stringsCutPrefix(line, "/*") + if !inStar { + break + } + } + if line != "" { + // Found non-comment non-blank line. + // Ends space for valid //go:build comments, + // but also ends the fraction of the file we can + // reliably parse. From this point on we might + // incorrectly flag "comments" inside multiline + // string constants or anything else (this might + // not even be a Go program). So stop. + break + } + } +} + +func (check *checker) comment(pos token.Pos, line string) { + if !strings.HasPrefix(line, "//go:") { + return + } + // testing hack: stop at // ERROR + if i := strings.Index(line, " // ERROR "); i >= 0 { + line = line[:i] + } + + verb := line + if i := strings.IndexFunc(verb, unicode.IsSpace); i >= 0 { + verb = verb[:i] + if line[i] != ' ' && line[i] != '\t' && line[i] != '\n' { + r, _ := utf8.DecodeRuneInString(line[i:]) + check.pass.Reportf(pos, "invalid space %#q in %s directive", r, verb) + } + } + + switch verb { + default: + // TODO: Use the go language version for the file. + // If that version is not newer than us, then we can + // report unknown directives. + + case "//go:build": + // Ignore. The buildtag analyzer reports misplaced comments. + + case "//go:debug": + if check.file == nil { + check.pass.Reportf(pos, "//go:debug directive only valid in Go source files") + } else if check.file.Name.Name != "main" && !strings.HasSuffix(check.filename, "_test.go") { + check.pass.Reportf(pos, "//go:debug directive only valid in package main or test") + } else if !check.inHeader { + check.pass.Reportf(pos, "//go:debug directive only valid before package declaration") + } + } +} + +// Go 1.20 strings.CutPrefix. +func stringsCutPrefix(s, prefix string) (after string, found bool) { + if !strings.HasPrefix(s, prefix) { + return s, false + } + return s[len(prefix):], true +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index 96adad3ee8..7f62ad4c82 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -13,6 +13,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" ) @@ -25,6 +26,7 @@ of the second argument is not a pointer to a type implementing error.` var Analyzer = &analysis.Analyzer{ Name: "errorsas", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/errorsas", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -37,6 +39,10 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } + if !analysisutil.Imports(pass.Pkg, "errors") { + return nil, nil // doesn't directly import errors + } + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -45,15 +51,12 @@ func run(pass *analysis.Pass) (interface{}, error) { inspect.Preorder(nodeFilter, func(n ast.Node) { call := n.(*ast.CallExpr) fn := typeutil.StaticCallee(pass.TypesInfo, call) - if fn == nil { - return // not a static call + if !analysisutil.IsFunctionNamed(fn, "errors", "As") { + return } if len(call.Args) < 2 { return // not enough arguments, e.g. called with return values of another function } - if fn.FullName() != "errors.As" { - return - } if err := checkAsTarget(pass, call.Args[1]); err != nil { pass.ReportRangef(call, "%v", err) } @@ -63,9 +66,6 @@ func run(pass *analysis.Pass) (interface{}, error) { var errorType = types.Universe.Lookup("error").Type() -// pointerToInterfaceOrError reports whether the type of e is a pointer to an interface or a type implementing error, -// or is the empty interface. - // checkAsTarget reports an error if the second argument to errors.As is invalid. func checkAsTarget(pass *analysis.Pass, e ast.Expr) error { t := pass.TypesInfo.Types[e].Type diff --git a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go index aff663046a..012e2ecd0c 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -51,6 +51,7 @@ known as "false sharing" that slows down both goroutines. var Analyzer = &analysis.Analyzer{ Name: "fieldalignment", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/fieldalignment", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go index 27b1b8400f..2671573d1f 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -26,6 +26,7 @@ of a particular name.` var Analyzer = &analysis.Analyzer{ Name: "findcall", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/findcall", Run: run, RunDespiteErrors: true, FactTypes: []analysis.Fact{new(foundFact)}, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index 741492e477..0b3ded47ea 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -20,6 +20,7 @@ const Doc = "report assembly that clobbers the frame pointer before saving it" var Analyzer = &analysis.Analyzer{ Name: "framepointer", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/framepointer", Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go index 3b9168c6c3..c6b6c81b42 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go @@ -35,6 +35,7 @@ diagnostic for such mistakes.` var Analyzer = &analysis.Analyzer{ Name: "httpresponse", Doc: Doc, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/httpresponse", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -115,7 +116,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { if res.Len() != 2 { return false // the function called does not return two values. } - if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !isNamedType(ptr.Elem(), "net/http", "Response") { + if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !analysisutil.IsNamedType(ptr.Elem(), "net/http", "Response") { return false // the first return type is not *http.Response. } @@ -130,11 +131,11 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool { return ok && id.Name == "http" // function in net/http package. } - if isNamedType(typ, "net/http", "Client") { + if analysisutil.IsNamedType(typ, "net/http", "Client") { return true // method on http.Client. } ptr, ok := typ.(*types.Pointer) - return ok && isNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client. + return ok && analysisutil.IsNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client. } // restOfBlock, given a traversal stack, finds the innermost containing @@ -170,13 +171,3 @@ func rootIdent(n ast.Node) *ast.Ident { return nil } } - -// isNamedType reports whether t is the named type path.name. -func isNamedType(t types.Type, path, name string) bool { - n, ok := t.(*types.Named) - if !ok { - return false - } - obj := n.Obj() - return obj.Name() == name && obj.Pkg() != nil && obj.Pkg().Path() == path -} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/doc.go new file mode 100644 index 0000000000..3d2b1a3dcb --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/doc.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ifaceassert defines an Analyzer that flags +// impossible interface-interface type assertions. +// +// # Analyzer ifaceassert +// +// ifaceassert: detect impossible interface-to-interface type assertions +// +// This checker flags type assertions v.(T) and corresponding type-switch cases +// in which the static type V of v is an interface that cannot possibly implement +// the target interface T. This occurs when V and T contain methods with the same +// name but different signatures. Example: +// +// var v interface { +// Read() +// } +// _ = v.(io.Reader) +// +// The Read method in v has a different signature than the Read method in +// io.Reader, so this assertion cannot succeed. +package ifaceassert diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 30130f63ea..cd4a477626 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -2,38 +2,26 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package ifaceassert defines an Analyzer that flags -// impossible interface-interface type assertions. package ifaceassert import ( + _ "embed" "go/ast" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `detect impossible interface-to-interface type assertions - -This checker flags type assertions v.(T) and corresponding type-switch cases -in which the static type V of v is an interface that cannot possibly implement -the target interface T. This occurs when V and T contain methods with the same -name but different signatures. Example: - - var v interface { - Read() - } - _ = v.(io.Reader) - -The Read method in v has a different signature than the Read method in -io.Reader, so this assertion cannot succeed. -` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "ifaceassert", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "ifaceassert"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/ifaceassert", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go index b35f62dc73..12507f9967 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go @@ -67,7 +67,7 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { // of a generic function type (or an interface method) that is // part of the type we're testing. We don't care about these type // parameters. - // Similarly, the receiver of a method may declare (rather then + // Similarly, the receiver of a method may declare (rather than // use) type parameters, we don't care about those either. // Thus, we only need to look at the input and result parameters. return w.isParameterized(t.Params()) || w.isParameterized(t.Results()) @@ -95,14 +95,14 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { return w.isParameterized(t.Elem()) case *types.Named: - list := typeparams.NamedTypeArgs(t) + list := t.TypeArgs() for i, n := 0, list.Len(); i < n; i++ { if w.isParameterized(list.At(i)) { return true } } - case *typeparams.TypeParam: + case *types.TypeParam: return true default: diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go index 165c70cbd3..3b121cb0ce 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -38,6 +38,7 @@ import ( var Analyzer = &analysis.Analyzer{ Name: "inspect", Doc: "optimize AST traversal for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", Run: run, RunDespiteErrors: true, ResultType: reflect.TypeOf(new(inspector.Inspector)), diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go index ac37e4784e..3f01b3b55d 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go @@ -12,7 +12,9 @@ import ( "go/printer" "go/token" "go/types" - "io/ioutil" + "os" + + "golang.org/x/tools/internal/analysisinternal" ) // Format returns a string representation of the expression. @@ -55,21 +57,10 @@ func HasSideEffects(info *types.Info, e ast.Expr) bool { return !safe } -// Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} - // ReadFile reads a file and adds it to the FileSet // so that we can report errors against it using lineStart. func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) { - content, err := ioutil.ReadFile(filename) + content, err := os.ReadFile(filename) if err != nil { return nil, nil, err } @@ -118,3 +109,48 @@ func Imports(pkg *types.Package, path string) bool { } return false } + +// IsNamedType reports whether t is the named type with the given package path +// and one of the given names. +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsNamedType(t types.Type, pkgPath string, names ...string) bool { + n, ok := t.(*types.Named) + if !ok { + return false + } + obj := n.Obj() + if obj == nil || obj.Pkg() == nil || obj.Pkg().Path() != pkgPath { + return false + } + name := obj.Name() + for _, n := range names { + if name == n { + return true + } + } + return false +} + +// IsFunctionNamed reports whether f is a top-level function defined in the +// given package and has one of the given names. +// It returns false if f is nil or a method. +func IsFunctionNamed(f *types.Func, pkgPath string, names ...string) bool { + if f == nil { + return false + } + if f.Pkg() == nil || f.Pkg().Path() != pkgPath { + return false + } + if f.Type().(*types.Signature).Recv() != nil { + return false + } + for _, n := range names { + if f.Name() == n { + return true + } + } + return false +} + +var MustExtractDoc = analysisinternal.MustExtractDoc diff --git a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go new file mode 100644 index 0000000000..c95b1c1c98 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/doc.go @@ -0,0 +1,75 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loopclosure defines an Analyzer that checks for references to +// enclosing loop variables from within nested functions. +// +// # Analyzer loopclosure +// +// loopclosure: check references to loop variables from within nested functions +// +// This analyzer reports places where a function literal references the +// iteration variable of an enclosing loop, and the loop calls the function +// in such a way (e.g. with go or defer) that it may outlive the loop +// iteration and possibly observe the wrong value of the variable. +// +// Note: An iteration variable can only outlive a loop iteration in Go versions <=1.21. +// In Go 1.22 and later, the loop variable lifetimes changed to create a new +// iteration variable per loop iteration. (See go.dev/issue/60078.) +// +// In this example, all the deferred functions run after the loop has +// completed, so all observe the final value of v [ 0 { + if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { pass.Reportf(fn.Pos(), "%s should not have type params", fnName) } @@ -474,7 +459,7 @@ func checkTest(pass *analysis.Pass, fn *ast.FuncDecl, prefix string) { return } - if tparams := typeparams.ForFuncType(fn.Type); tparams != nil && len(tparams.List) > 0 { + if tparams := fn.Type.TypeParams; tparams != nil && len(tparams.List) > 0 { // Note: cmd/go/internal/load also errors about TestXXX and BenchmarkXXX functions with type parameters. // We have currently decided to also warn before compilation/package loading. This can help users in IDEs. // TODO(adonovan): use ReportRangef(tparams). diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/doc.go new file mode 100644 index 0000000000..5c665b298b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/doc.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeformat defines an Analyzer that checks for the use +// of time.Format or time.Parse calls with a bad format. +// +// # Analyzer timeformat +// +// timeformat: check for calls of (time.Time).Format or time.Parse with 2006-02-01 +// +// The timeformat checker looks for time formats with the 2006-02-01 (yyyy-dd-mm) +// format. Internationally, "yyyy-dd-mm" does not occur in common calendar date +// standards, and so it is more likely that 2006-01-02 (yyyy-mm-dd) was intended. +package timeformat diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go new file mode 100644 index 0000000000..eb84502bd9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go @@ -0,0 +1,120 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeformat defines an Analyzer that checks for the use +// of time.Format or time.Parse calls with a bad format. +package timeformat + +import ( + _ "embed" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" +) + +const badFormat = "2006-02-01" +const goodFormat = "2006-01-02" + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "timeformat", + Doc: analysisutil.MustExtractDoc(doc, "timeformat"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/timeformat", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Note: (time.Time).Format is a method and can be a typeutil.Callee + // without directly importing "time". So we cannot just skip this package + // when !analysisutil.Imports(pass.Pkg, "time"). + // TODO(taking): Consider using a prepass to collect typeutil.Callees. + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + inspect.Preorder(nodeFilter, func(n ast.Node) { + call := n.(*ast.CallExpr) + fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) + if !ok { + return + } + if !isTimeDotFormat(fn) && !isTimeDotParse(fn) { + return + } + if len(call.Args) > 0 { + arg := call.Args[0] + badAt := badFormatAt(pass.TypesInfo, arg) + + if badAt > -1 { + // Check if it's a literal string, otherwise we can't suggest a fix. + if _, ok := arg.(*ast.BasicLit); ok { + pos := int(arg.Pos()) + badAt + 1 // +1 to skip the " or ` + end := pos + len(badFormat) + + pass.Report(analysis.Diagnostic{ + Pos: token.Pos(pos), + End: token.Pos(end), + Message: badFormat + " should be " + goodFormat, + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Replace " + badFormat + " with " + goodFormat, + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos), + End: token.Pos(end), + NewText: []byte(goodFormat), + }}, + }}, + }) + } else { + pass.Reportf(arg.Pos(), badFormat+" should be "+goodFormat) + } + } + } + }) + return nil, nil +} + +func isTimeDotFormat(f *types.Func) bool { + if f.Name() != "Format" || f.Pkg() == nil || f.Pkg().Path() != "time" { + return false + } + // Verify that the receiver is time.Time. + recv := f.Type().(*types.Signature).Recv() + return recv != nil && analysisutil.IsNamedType(recv.Type(), "time", "Time") +} + +func isTimeDotParse(f *types.Func) bool { + return analysisutil.IsFunctionNamed(f, "time", "Parse") +} + +// badFormatAt return the start of a bad format in e or -1 if no bad format is found. +func badFormatAt(info *types.Info, e ast.Expr) int { + tv, ok := info.Types[e] + if !ok { // no type info, assume good + return -1 + } + + t, ok := tv.Type.(*types.Basic) + if !ok || t.Info()&types.IsString == 0 { + return -1 + } + + if tv.Value == nil { + return -1 + } + + return strings.Index(constant.StringVal(tv.Value), badFormat) +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/doc.go new file mode 100644 index 0000000000..5781bbd32d --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/doc.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unmarshal package defines an Analyzer that checks for passing +// non-pointer or non-interface types to unmarshal and decode functions. +// +// # Analyzer unmarshal +// +// unmarshal: report passing non-pointer or non-interface values to unmarshal +// +// The unmarshal analysis reports calls to functions such as json.Unmarshal +// in which the argument type is not a pointer or an interface. +package unmarshal diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go index 5129048a07..f4e73528b4 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go @@ -2,29 +2,27 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// The unmarshal package defines an Analyzer that checks for passing -// non-pointer or non-interface types to unmarshal and decode functions. package unmarshal import ( + _ "embed" "go/ast" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/typeparams" ) -const Doc = `report passing non-pointer or non-interface values to unmarshal - -The unmarshal analysis reports calls to functions such as json.Unmarshal -in which the argument type is not a pointer or an interface.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unmarshal", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unmarshal"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unmarshal", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -37,6 +35,12 @@ func run(pass *analysis.Pass) (interface{}, error) { return nil, nil } + // Note: (*"encoding/json".Decoder).Decode, (* "encoding/gob".Decoder).Decode + // and (* "encoding/xml".Decoder).Decode are methods and can be a typeutil.Callee + // without directly importing their packages. So we cannot just skip this package + // when !analysisutil.Imports(pass.Pkg, "encoding/..."). + // TODO(taking): Consider using a prepass to collect typeutil.Callees. + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ @@ -51,6 +55,7 @@ func run(pass *analysis.Pass) (interface{}, error) { // Classify the callee (without allocating memory). argidx := -1 + recv := fn.Type().(*types.Signature).Recv() if fn.Name() == "Unmarshal" && recv == nil { // "encoding/json".Unmarshal @@ -86,7 +91,7 @@ func run(pass *analysis.Pass) (interface{}, error) { t := pass.TypesInfo.Types[call.Args[argidx]].Type switch t.Underlying().(type) { - case *types.Pointer, *types.Interface, *typeparams.TypeParam: + case *types.Pointer, *types.Interface, *types.TypeParam: return } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go new file mode 100644 index 0000000000..d17d0d9444 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/doc.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unreachable defines an Analyzer that checks for unreachable code. +// +// # Analyzer unreachable +// +// unreachable: check for unreachable code +// +// The unreachable analyzer finds statements that execution can never reach +// because they are preceded by an return statement, a call to panic, an +// infinite loop, or similar constructs. +package unreachable diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 90896dd1bb..b810db7ee9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -2,30 +2,29 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package unreachable defines an Analyzer that checks for unreachable code. package unreachable // TODO(adonovan): use the new cfg package, which is more precise. import ( + _ "embed" "go/ast" "go/token" "log" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check for unreachable code - -The unreachable analyzer finds statements that execution can never reach -because they are preceded by an return statement, a call to panic, an -infinite loop, or similar constructs.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unreachable", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unreachable"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unreachable", Requires: []*analysis.Analyzer{inspect.Analyzer}, RunDespiteErrors: true, Run: run, diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/doc.go new file mode 100644 index 0000000000..de10804cb1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/doc.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeptr defines an Analyzer that checks for invalid +// conversions of uintptr to unsafe.Pointer. +// +// # Analyzer unsafeptr +// +// unsafeptr: check for invalid conversions of uintptr to unsafe.Pointer +// +// The unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer +// to convert integers to pointers. A conversion from uintptr to +// unsafe.Pointer is invalid if it implies that there is a uintptr-typed +// word in memory that holds a pointer value, because that word will be +// invisible to stack copying and to the garbage collector. +package unsafeptr diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index ed86e5ebf0..32e71ef979 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -7,6 +7,7 @@ package unsafeptr import ( + _ "embed" "go/ast" "go/token" "go/types" @@ -14,20 +15,17 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" ) -const Doc = `check for invalid conversions of uintptr to unsafe.Pointer - -The unsafeptr analyzer reports likely incorrect uses of unsafe.Pointer -to convert integers to pointers. A conversion from uintptr to -unsafe.Pointer is invalid if it implies that there is a uintptr-typed -word in memory that holds a pointer value, because that word will be -invisible to stack copying and to the garbage collector.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unsafeptr", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unsafeptr"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unsafeptr", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -71,7 +69,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // Check unsafe.Pointer safety rules according to // https://golang.org/pkg/unsafe/#Pointer. - switch x := analysisutil.Unparen(x).(type) { + switch x := astutil.Unparen(x).(type) { case *ast.SelectorExpr: // "(6) Conversion of a reflect.SliceHeader or // reflect.StringHeader Data field to or from Pointer." @@ -107,8 +105,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { } switch sel.Sel.Name { case "Pointer", "UnsafeAddr": - t, ok := info.Types[sel.X].Type.(*types.Named) - if ok && t.Obj().Pkg().Path() == "reflect" && t.Obj().Name() == "Value" { + if analysisutil.IsNamedType(info.Types[sel.X].Type, "reflect", "Value") { return true } } @@ -121,7 +118,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool { // isSafeArith reports whether x is a pointer arithmetic expression that is safe // to convert to unsafe.Pointer. func isSafeArith(info *types.Info, x ast.Expr) bool { - switch x := analysisutil.Unparen(x).(type) { + switch x := astutil.Unparen(x).(type) { case *ast.CallExpr: // Base case: initial conversion from unsafe.Pointer to uintptr. return len(x.Args) == 1 && @@ -156,13 +153,5 @@ func hasBasicType(info *types.Info, x ast.Expr, kind types.BasicKind) bool { // isReflectHeader reports whether t is reflect.SliceHeader or reflect.StringHeader. func isReflectHeader(t types.Type) bool { - if named, ok := t.(*types.Named); ok { - if obj := named.Obj(); obj.Pkg() != nil && obj.Pkg().Path() == "reflect" { - switch obj.Name() { - case "SliceHeader", "StringHeader": - return true - } - } - } - return false + return analysisutil.IsNamedType(t, "reflect", "SliceHeader", "StringHeader") } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/doc.go new file mode 100644 index 0000000000..a1bf4cf940 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/doc.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedresult defines an analyzer that checks for unused +// results of calls to certain pure functions. +// +// # Analyzer unusedresult +// +// unusedresult: check for unused results of calls to some functions +// +// Some functions like fmt.Errorf return a result and have no side +// effects, so it is always a mistake to discard the result. Other +// functions may return an error that must not be ignored, or a cleanup +// operation that must be called. This analyzer reports calls to +// functions like these when the result of the call is ignored. +// +// The set of functions may be controlled using flags. +package unusedresult diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index 06747ba72b..76f42b052e 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -3,10 +3,18 @@ // license that can be found in the LICENSE file. // Package unusedresult defines an analyzer that checks for unused -// results of calls to certain pure functions. +// results of calls to certain functions. package unusedresult +// It is tempting to make this analysis inductive: for each function +// that tail-calls one of the functions that we check, check those +// functions too. However, just because you must use the result of +// fmt.Sprintf doesn't mean you need to use the result of every +// function that returns a formatted string: it may have other results +// and effects. + import ( + _ "embed" "go/ast" "go/token" "go/types" @@ -16,25 +24,18 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/ast/inspector" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/types/typeutil" ) -// TODO(adonovan): make this analysis modular: export a mustUseResult -// fact for each function that tail-calls one of the functions that we -// check, and check those functions too. - -const Doc = `check for unused results of calls to some functions - -Some functions like fmt.Errorf return a result and have no side effects, -so it is always a mistake to discard the result. This analyzer reports -calls to certain functions in which the result of the call is ignored. - -The set of functions may be controlled using flags.` +//go:embed doc.go +var doc string var Analyzer = &analysis.Analyzer{ Name: "unusedresult", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unusedresult"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedresult", Requires: []*analysis.Analyzer{inspect.Analyzer}, Run: run, } @@ -43,9 +44,40 @@ var Analyzer = &analysis.Analyzer{ var funcs, stringMethods stringSetFlag func init() { - // TODO(adonovan): provide a comment syntax to allow users to - // add their functions to this set using facts. - funcs.Set("errors.New,fmt.Errorf,fmt.Sprintf,fmt.Sprint,sort.Reverse,context.WithValue,context.WithCancel,context.WithDeadline,context.WithTimeout") + // TODO(adonovan): provide a comment or declaration syntax to + // allow users to add their functions to this set using facts. + // For example: + // + // func ignoringTheErrorWouldBeVeryBad() error { + // type mustUseResult struct{} // enables vet unusedresult check + // ... + // } + // + // ignoringTheErrorWouldBeVeryBad() // oops + // + + // List standard library functions here. + // The context.With{Cancel,Deadline,Timeout} entries are + // effectively redundant wrt the lostcancel analyzer. + funcs = stringSetFlag{ + "context.WithCancel": true, + "context.WithDeadline": true, + "context.WithTimeout": true, + "context.WithValue": true, + "errors.New": true, + "fmt.Errorf": true, + "fmt.Sprint": true, + "fmt.Sprintf": true, + "slices.Clip": true, + "slices.Compact": true, + "slices.CompactFunc": true, + "slices.Delete": true, + "slices.DeleteFunc": true, + "slices.Grow": true, + "slices.Insert": true, + "slices.Replace": true, + "sort.Reverse": true, + } Analyzer.Flags.Var(&funcs, "funcs", "comma-separated list of functions whose results must be used") @@ -57,49 +89,41 @@ func init() { func run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + // Split functions into (pkg, name) pairs to save allocation later. + pkgFuncs := make(map[[2]string]bool, len(funcs)) + for s := range funcs { + if i := strings.LastIndexByte(s, '.'); i > 0 { + pkgFuncs[[2]string{s[:i], s[i+1:]}] = true + } + } + nodeFilter := []ast.Node{ (*ast.ExprStmt)(nil), } inspect.Preorder(nodeFilter, func(n ast.Node) { - call, ok := analysisutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) + call, ok := astutil.Unparen(n.(*ast.ExprStmt).X).(*ast.CallExpr) if !ok { return // not a call statement } - fun := analysisutil.Unparen(call.Fun) - - if pass.TypesInfo.Types[fun].IsType() { - return // a conversion, not a call - } - x, _, _, _ := typeparams.UnpackIndexExpr(fun) - if x != nil { - fun = x // If this is generic function or method call, skip the instantiation arguments - } - - selector, ok := fun.(*ast.SelectorExpr) + // Call to function or method? + fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func) if !ok { - return // neither a method call nor a qualified ident + return // e.g. var or builtin } - - sel, ok := pass.TypesInfo.Selections[selector] - if ok && sel.Kind() == types.MethodVal { + if sig := fn.Type().(*types.Signature); sig.Recv() != nil { // method (e.g. foo.String()) - obj := sel.Obj().(*types.Func) - sig := sel.Type().(*types.Signature) if types.Identical(sig, sigNoArgsStringResult) { - if stringMethods[obj.Name()] { + if stringMethods[fn.Name()] { pass.Reportf(call.Lparen, "result of (%s).%s call not used", - sig.Recv().Type(), obj.Name()) + sig.Recv().Type(), fn.Name()) } } - } else if !ok { - // package-qualified function (e.g. fmt.Errorf) - obj := pass.TypesInfo.Uses[selector.Sel] - if obj, ok := obj.(*types.Func); ok { - qname := obj.Pkg().Path() + "." + obj.Name() - if funcs[qname] { - pass.Reportf(call.Lparen, "result of %v call not used", qname) - } + } else { + // package-level function (e.g. fmt.Errorf) + if pkgFuncs[[2]string{fn.Pkg().Path(), fn.Name()}] { + pass.Reportf(call.Lparen, "result of %s.%s call not used", + fn.Pkg().Path(), fn.Name()) } } }) diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/doc.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/doc.go new file mode 100644 index 0000000000..de10dc8c8e --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/doc.go @@ -0,0 +1,34 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unusedwrite checks for unused writes to the elements of a struct or array object. +// +// # Analyzer unusedwrite +// +// unusedwrite: checks for unused writes +// +// The analyzer reports instances of writes to struct fields and +// arrays that are never read. Specifically, when a struct object +// or an array is copied, its elements are copied implicitly by +// the compiler, and any element write to this copy does nothing +// with the original object. +// +// For example: +// +// type T struct { x int } +// +// func f(input []T) { +// for i, v := range input { // v is a copy +// v.x = i // unused write to field x +// } +// } +// +// Another example is about non-pointer receiver: +// +// type T struct { x int } +// +// func (t T) f() { // t is a copy +// t.x = i // unused write to field x +// } +package unusedwrite diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go index 9cc45e0a36..f5d0f116ca 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go @@ -2,49 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package unusedwrite checks for unused writes to the elements of a struct or array object. package unusedwrite import ( + _ "embed" "fmt" "go/types" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" "golang.org/x/tools/go/ssa" ) -// Doc is a documentation string. -const Doc = `checks for unused writes - -The analyzer reports instances of writes to struct fields and -arrays that are never read. Specifically, when a struct object -or an array is copied, its elements are copied implicitly by -the compiler, and any element write to this copy does nothing -with the original object. - -For example: - - type T struct { x int } - func f(input []T) { - for i, v := range input { // v is a copy - v.x = i // unused write to field x - } - } - -Another example is about non-pointer receiver: - - type T struct { x int } - func (t T) f() { // t is a copy - t.x = i // unused write to field x - } -` +//go:embed doc.go +var doc string // Analyzer reports instances of writes to struct fields and arrays // that are never read. var Analyzer = &analysis.Analyzer{ Name: "unusedwrite", - Doc: Doc, + Doc: analysisutil.MustExtractDoc(doc, "unusedwrite"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/unusedwrite", Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: run, } diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go index 9da5692af5..4f2c404562 100644 --- a/vendor/golang.org/x/tools/go/analysis/validate.go +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -19,6 +19,8 @@ import ( // that the Requires graph is acyclic; // that analyzer fact types are unique; // that each fact type is a pointer. +// +// Analyzer names need not be unique, though this may be confusing. func Validate(analyzers []*Analyzer) error { // Map each fact type to its sole generating analyzer. factTypes := make(map[reflect.Type]*Analyzer) diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 9fa5aa192c..2c4c4e2328 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -11,8 +11,6 @@ import ( "go/ast" "go/token" "sort" - - "golang.org/x/tools/internal/typeparams" ) // PathEnclosingInterval returns the node that encloses the source @@ -322,7 +320,7 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, n.Recv) } children = append(children, n.Name) - if tparams := typeparams.ForFuncType(n.Type); tparams != nil { + if tparams := n.Type.TypeParams; tparams != nil { children = append(children, tparams) } if n.Type.Params != nil { @@ -377,7 +375,7 @@ func childrenOf(n ast.Node) []ast.Node { tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: children = append(children, tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) @@ -588,7 +586,7 @@ func NodeDescription(n ast.Node) string { return "decrement statement" case *ast.IndexExpr: return "index expression" - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return "index list expression" case *ast.InterfaceType: return "interface type" diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index f430b21b9b..58934f7663 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -9,8 +9,6 @@ import ( "go/ast" "reflect" "sort" - - "golang.org/x/tools/internal/typeparams" ) // An ApplyFunc is invoked by Apply for each node n, even if n is nil, @@ -252,7 +250,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "X", nil, n.X) a.apply(n, "Index", nil, n.Index) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: a.apply(n, "X", nil, n.X) a.applyList(n, "Indices") @@ -293,7 +291,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "Fields", nil, n.Fields) case *ast.FuncType: - if tparams := typeparams.ForFuncType(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Params", nil, n.Params) @@ -408,7 +406,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. case *ast.TypeSpec: a.apply(n, "Doc", nil, n.Doc) a.apply(n, "Name", nil, n.Name) - if tparams := typeparams.ForTypeSpec(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Type", nil, n.Type) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 3fbfebf369..1fc1de0bd1 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -64,8 +64,9 @@ type event struct { // depth-first order. It calls f(n) for each node n before it visits // n's children. // +// The complete traversal sequence is determined by ast.Inspect. // The types argument, if non-empty, enables type-based filtering of -// events. The function f if is called only for nodes whose type +// events. The function f is called only for nodes whose type // matches an element of the types slice. func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // Because it avoids postorder calls to f, and the pruning @@ -97,6 +98,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // of the non-nil children of the node, followed by a call of // f(n, false). // +// The complete traversal sequence is determined by ast.Inspect. // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index 703c813954..2a872f89d4 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - "golang.org/x/tools/internal/typeparams" ) const ( @@ -171,7 +169,7 @@ func typeOf(n ast.Node) uint64 { return 1 << nIncDecStmt case *ast.IndexExpr: return 1 << nIndexExpr - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return 1 << nIndexListExpr case *ast.InterfaceType: return 1 << nInterfaceType diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go index 15025f645f..763d18809b 100644 --- a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -8,7 +8,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -76,7 +75,7 @@ func FakeContext(pkgs map[string]map[string]string) *build.Context { if !ok { return nil, fmt.Errorf("file not found: %s", filename) } - return ioutil.NopCloser(strings.NewReader(content)), nil + return io.NopCloser(strings.NewReader(content)), nil } ctxt.IsAbsPath = func(path string) bool { path = filepath.ToSlash(path) diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go index bdbfd93147..7e371658d9 100644 --- a/vendor/golang.org/x/tools/go/buildutil/overlay.go +++ b/vendor/golang.org/x/tools/go/buildutil/overlay.go @@ -10,7 +10,6 @@ import ( "fmt" "go/build" "io" - "io/ioutil" "path/filepath" "strconv" "strings" @@ -33,7 +32,7 @@ func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Conte // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir rc := func(data []byte) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(data)), nil + return io.NopCloser(bytes.NewBuffer(data)), nil } copy := *orig // make a copy diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f8f..03543bd4bb 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go index 3fce480034..697974bb9b 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -57,20 +57,18 @@ import ( "go/build" "go/parser" "go/token" - "io/ioutil" "log" "os" + "os/exec" "path/filepath" "regexp" "strings" - - exec "golang.org/x/sys/execabs" ) // ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses // the output and returns the resulting ASTs. func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) { - tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") + tmpdir, err := os.MkdirTemp("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C") if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go index 7d94bbc1e5..b5bb95a63e 100644 --- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go +++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" "go/build" - exec "golang.org/x/sys/execabs" + "os/exec" "strings" ) diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f82a..333676b7cf 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,42 +8,46 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" ) -var debug = false - -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { - if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? inv.Verb = "env" inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr } else { - return nil, friendlyErr + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go index edf62c2cc0..013c0f505b 100644 --- a/vendor/golang.org/x/tools/go/loader/loader.go +++ b/vendor/golang.org/x/tools/go/loader/loader.go @@ -23,7 +23,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/internal/cgo" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) var ignoreVendor build.ImportMode @@ -1033,13 +1033,14 @@ func (imp *importer) newPackageInfo(path, dir string) *PackageInfo { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), }, errorFunc: imp.conf.TypeChecker.Error, dir: dir, } - typeparams.InitInstanceInfo(&info.Info) + versions.InitFileVersions(&info.Info) // Copy the types.Config so we can vary it across PackageInfos. tc := imp.conf.TypeChecker diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89fe6..b2a0b7c6a6 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,32 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. + +Load may be used in Go projects that use alternative build systems, by +installing an appropriate "driver" program for the build system and +specifying its location in the GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +they identify. +(See driverRequest and driverResponse types for the JSON +schema used by the protocol. +Though the protocol is supported, these types are currently unexported; +see #64608 for a proposal to publish them.) + +Regardless of driver, all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -35,7 +55,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) @@ -64,7 +84,7 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. */ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7d2..7db1d1293a 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -12,8 +12,8 @@ import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d2e..cd375fbc3c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,10 +9,9 @@ import ( "context" "encoding/json" "fmt" - "go/types" - "io/ioutil" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -22,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -153,10 +151,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + sizeserr = err + response.dr.Compiler = compiler + response.dr.Arch = arch sizeswg.Done() }() } @@ -210,62 +208,6 @@ extractQueries: } } - // Only use go/packages' overlay processing if we're using a Go version - // below 1.16. Otherwise, go list handles it. - if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } - - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{{ - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }}, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) - } - } - } - } - } - // Add root for any package that matches a pattern. This applies only to - // packages that are modified by overlays, since they are not added as - // roots automatically. - for _, pattern := range restPatterns { - match := matchPattern(pattern) - for _, pkgID := range modifiedPkgs { - pkg, ok := response.seenPackages[pkgID] - if !ok { - continue - } - if match(pkg.PkgPath) { - response.addRoot(pkg.ID) - } - } - } - } - sizeswg.Wait() if sizeserr != nil { return nil, sizeserr @@ -273,24 +215,6 @@ extractQueries: return response.dr, nil } -func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } - dr, err := state.createDriverResponse(pkgs...) - if err != nil { - return err - } - for _, pkg := range dr.Packages { - response.addPackage(pkg) - } - _, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return err - } - return state.addNeededOverlayPackages(response, needPkgs) -} - func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. @@ -625,7 +549,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,16 +592,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { addFilenameFromPos := func(pos string) bool { split := strings.Split(pos, ":") @@ -891,6 +816,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) @@ -1100,7 +1034,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err if len(state.cfg.Overlay) == 0 { return "", func() {}, nil } - dir, err := ioutil.TempDir("", "gopackages-*") + dir, err := os.MkdirTemp("", "gopackages-*") if err != nil { return "", nil, err } @@ -1119,7 +1053,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err // Create a unique filename for the overlaid files, to avoid // creating nested directories. noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) if err != nil { return "", func() {}, err } @@ -1137,7 +1071,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err } // Write out the overlay file that contains the filepath mappings. filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { + if err := os.WriteFile(filename, b, 0665); err != nil { return "", func() {}, err } return filename, cleanup, nil diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 9576b472f9..d823c474ad 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -6,314 +6,11 @@ package packages import ( "encoding/json" - "fmt" - "go/parser" - "go/token" - "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" "golang.org/x/tools/internal/gocommand" ) -// processGolistOverlay provides rudimentary support for adding -// files that don't exist on disk to an overlay. The results can be -// sometimes incorrect. -// TODO(matloob): Handle unsupported cases, including the following: -// - determining the correct package to add given a new import path -func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { - havePkgs := make(map[string]string) // importPath -> non-test package ID - needPkgsSet := make(map[string]bool) - modifiedPkgsSet := make(map[string]bool) - - pkgOfDir := make(map[string][]*Package) - for _, pkg := range response.dr.Packages { - // This is an approximation of import path to id. This can be - // wrong for tests, vendored packages, and a number of other cases. - havePkgs[pkg.PkgPath] = pkg.ID - dir, err := commonDir(pkg.GoFiles) - if err != nil { - return nil, nil, err - } - if dir != "" { - pkgOfDir[dir] = append(pkgOfDir[dir], pkg) - } - } - - // If no new imports are added, it is safe to avoid loading any needPkgs. - // Otherwise, it's hard to tell which package is actually being loaded - // (due to vendoring) and whether any modified package will show up - // in the transitive set of dependencies (because new imports are added, - // potentially modifying the transitive set of dependencies). - var overlayAddsImports bool - - // If both a package and its test package are created by the overlay, we - // need the real package first. Process all non-test files before test - // files, and make the whole process deterministic while we're at it. - var overlayFiles []string - for opath := range state.cfg.Overlay { - overlayFiles = append(overlayFiles, opath) - } - sort.Slice(overlayFiles, func(i, j int) bool { - iTest := strings.HasSuffix(overlayFiles[i], "_test.go") - jTest := strings.HasSuffix(overlayFiles[j], "_test.go") - if iTest != jTest { - return !iTest // non-tests are before tests. - } - return overlayFiles[i] < overlayFiles[j] - }) - for _, opath := range overlayFiles { - contents := state.cfg.Overlay[opath] - base := filepath.Base(opath) - dir := filepath.Dir(opath) - var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant - var testVariantOf *Package // if opath is a test file, this is the package it is testing - var fileExists bool - isTestFile := strings.HasSuffix(opath, "_test.go") - pkgName, ok := extractPackageName(opath, contents) - if !ok { - // Don't bother adding a file that doesn't even have a parsable package statement - // to the overlay. - continue - } - // If all the overlay files belong to a different package, change the - // package name to that package. - maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) - nextPackage: - for _, p := range response.dr.Packages { - if pkgName != p.Name && p.ID != "command-line-arguments" { - continue - } - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue - } - // Make sure to capture information on the package's test variant, if needed. - if isTestFile && !hasTestFiles(p) { - // TODO(matloob): Are there packages other than the 'production' variant - // of a package that this can match? This shouldn't match the test main package - // because the file is generated in another directory. - testVariantOf = p - continue nextPackage - } else if !isTestFile && hasTestFiles(p) { - // We're examining a test variant, but the overlaid file is - // a non-test file. Because the overlay implementation - // (currently) only adds a file to one package, skip this - // package, so that we can add the file to the production - // variant of the package. (https://golang.org/issue/36857 - // tracks handling overlays on both the production and test - // variant of a package). - continue nextPackage - } - if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // We have already seen the production version of the - // for which p is a test variant. - if hasTestFiles(p) { - testVariantOf = pkg - } - } - pkg = p - if filepath.Base(f) == base { - fileExists = true - } - } - } - // The overlay could have included an entirely new package or an - // ad-hoc package. An ad-hoc package is one that we have manually - // constructed from inadequate `go list` results for a file= query. - // It will have the ID command-line-arguments. - if pkg == nil || pkg.ID == "command-line-arguments" { - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - pkgPath, ok, err := state.getPkgPath(dir) - if err != nil { - return nil, nil, err - } - if !ok { - break - } - var forTest string // only set for x tests - isXTest := strings.HasSuffix(pkgName, "_test") - if isXTest { - forTest = pkgPath - pkgPath += "_test" - } - id := pkgPath - if isTestFile { - if isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) - } else { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - } - if pkg != nil { - // TODO(rstambler): We should change the package's path and ID - // here. The only issue is that this messes with the roots. - } else { - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break - } - } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), - } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} - } - } - if isXTest { - pkg.forTest = forTest - } - } - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - // TODO(rstambler): If the package is an x test and the import has - // a test variant, make sure to replace it. - if _, found := pkg.Imports[imp]; found { - continue - } - overlayAddsImports = true - id, ok := havePkgs[imp] - if !ok { - var err error - id, err = state.resolveImport(dir, imp) - if err != nil { - return nil, nil, err - } - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as well. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} - } - } - } - - // toPkgPath guesses the package path given the id. - toPkgPath := func(sourceDir, id string) (string, error) { - if i := strings.IndexByte(id, ' '); i >= 0 { - return state.resolveImport(sourceDir, id[:i]) - } - return state.resolveImport(sourceDir, id) - } - - // Now that new packages have been created, do another pass to determine - // the new set of missing packages. - for _, pkg := range response.dr.Packages { - for _, imp := range pkg.Imports { - if len(pkg.GoFiles) == 0 { - return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) - } - pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) - if err != nil { - return nil, nil, err - } - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true - } - } - } - - if overlayAddsImports { - needPkgs = make([]string, 0, len(needPkgsSet)) - for pkg := range needPkgsSet { - needPkgs = append(needPkgs, pkg) - } - } - modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) - for pkg := range modifiedPkgsSet { - modifiedPkgs = append(modifiedPkgs, pkg) - } - return modifiedPkgs, needPkgs, err -} - -// resolveImport finds the ID of a package given its import path. -// In particular, it will find the right vendored copy when in GOPATH mode. -func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { - env, err := state.getEnv() - if err != nil { - return "", err - } - if env["GOMOD"] != "" { - return importPath, nil - } - - searchDir := sourceDir - for { - vendorDir := filepath.Join(searchDir, "vendor") - exists, ok := state.vendorDirs[vendorDir] - if !ok { - info, err := os.Stat(vendorDir) - exists = err == nil && info.IsDir() - state.vendorDirs[vendorDir] = exists - } - - if exists { - vendoredPath := filepath.Join(vendorDir, importPath) - if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { - // We should probably check for .go files here, but shame on anyone who fools us. - path, ok, err := state.getPkgPath(vendoredPath) - if err != nil { - return "", err - } - if ok { - return path, nil - } - } - } - - // We know we've hit the top of the filesystem when we Dir / and get /, - // or C:\ and get C:\, etc. - next := filepath.Dir(searchDir) - if next == searchDir { - break - } - searchDir = next - } - return importPath, nil -} - -func hasTestFiles(p *Package) bool { - for _, f := range p.GoFiles { - if strings.HasSuffix(f, "_test.go") { - return true - } - } - return false -} - // determineRootDirs returns a mapping from absolute directories that could // contain code to their corresponding import path prefixes. func (state *golistState) determineRootDirs() (map[string]string, error) { @@ -384,192 +81,3 @@ func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { } return m, nil } - -func extractImports(filename string, contents []byte) ([]string, error) { - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? - if err != nil { - return nil, err - } - var res []string - for _, imp := range f.Imports { - quotedPath := imp.Path.Value - path, err := strconv.Unquote(quotedPath) - if err != nil { - return nil, err - } - res = append(res, path) - } - return res, nil -} - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} - -// commonDir returns the directory that all files are in, "" if files is empty, -// or an error if they aren't in the same directory. -func commonDir(files []string) (string, error) { - seen := make(map[string]bool) - for _, f := range files { - seen[filepath.Dir(f)] = true - } - if len(seen) > 1 { - return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) - } - for k := range seen { - // seen has only one element; return it. - return k, nil - } - return "", nil // no files -} - -// It is possible that the files in the disk directory dir have a different package -// name from newName, which is deduced from the overlays. If they all have a different -// package name, and they all have the same package name, then that name becomes -// the package name. -// It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { - names := make(map[string]int) - for _, p := range pkgsOfDir { - names[p.Name]++ - } - if len(names) != 1 { - // some files are in different packages - return - } - var oldName string - for k := range names { - oldName = k - } - if newName == oldName { - return - } - // We might have a case where all of the package names in the directory are - // the same, but the overlay file is for an x test, which belongs to its - // own package. If the x test does not yet exist on disk, we may not yet - // have its package name on disk, but we should not rename the packages. - // - // We use a heuristic to determine if this file belongs to an x test: - // The test file should have a package name whose package name has a _test - // suffix or looks like "newName_test". - maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") - if isTestFile && maybeXTest { - return - } - for _, p := range pkgsOfDir { - p.Name = newName - } -} - -// This function is copy-pasted from -// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. -// It should be deleted when we remove support for overlays from go/packages. -// -// NOTE: This does not handle any ./... or ./ style queries, as this function -// doesn't know the working directory. -// -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separated pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b808..81e9e6a727 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,7 +16,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -28,8 +27,8 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -220,8 +219,10 @@ type driverResponse struct { // lists of multiple drivers, go/packages will fall back to the next driver. NotHandled bool - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string // Roots is the set of package IDs that make up the root packages. // We have to encode this separately because when we encode a single package @@ -257,31 +258,52 @@ type driverResponse struct { // proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { - l := newLoader(cfg) - response, err := defaultDriver(&l.Config, patterns...) + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) if err != nil { return nil, err } - l.sizes = response.Sizes - return l.refine(response) + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. // It will try to request to an external driver, if one exists. If there's // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - driver := findExternalDriver(cfg) - if driver == nil { - driver = goListDriver - } - response, err := driver(cfg, patterns...) - if err != nil { - return response, err - } else if response.NotHandled { - return goListDriver(cfg, patterns...) +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { + if driver := findExternalDriver(cfg); driver != nil { + response, err := driver(cfg, patterns...) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) } - return response, nil + + response, err := goListDriver(cfg, patterns...) + return response, false, err } // A Package describes a loaded Go package. @@ -308,6 +330,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source @@ -407,12 +432,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -549,7 +568,7 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes + sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue parseCacheMu sync.Mutex exportMu sync.Mutex // enforces mutual exclusion of exportdata operations @@ -627,7 +646,7 @@ func newLoader(cfg *Config) *loader { return ld } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. func (ld *loader) refine(response *driverResponse) ([]*Package, error) { roots := response.Roots @@ -674,39 +693,38 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } } - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - var srcPkgs []*loaderPackage - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - // If NeedImports isn't set, the imports fields will all be zeroed out. - if ld.Mode&NeedImports != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports lpkg.Imports = make(map[string]*Package, len(stubs)) for importPath, ipkg := range stubs { var importErr error @@ -730,40 +748,39 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } lpkg.Imports[importPath] = imp.Package } - } - if lpkg.needsrc { - srcPkgs = append(srcPkgs, lpkg) - } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc - } + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } - if ld.Mode&NeedImports == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc } - } else { + // For each initial package, create its import DAG. for _, lpkg := range initial { visit(lpkg) } - } - if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil } } + // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { @@ -997,10 +1014,11 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1038,7 +1056,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, Error: appendError, - Sizes: ld.sizes, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { @@ -1119,7 +1140,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index be8d36a6ee..8622dfc53a 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -4,106 +4,73 @@ package ssa -// This file implements the BUILD phase of SSA construction. +// This file defines the builder, which builds SSA-form IR for function bodies. // -// SSA construction has two phases, CREATE and BUILD. In the CREATE phase -// (create.go), all packages are constructed and type-checked and -// definitions of all package members are created, method-sets are -// computed, and wrapper methods are synthesized. -// ssa.Packages are created in arbitrary order. +// SSA construction has two phases, "create" and "build". First, one +// or more packages are created in any order by a sequence of calls to +// CreatePackage, either from syntax or from mere type information. +// Each created package has a complete set of Members (const, var, +// type, func) that can be accessed through methods like +// Program.FuncValue. // -// In the BUILD phase (builder.go), the builder traverses the AST of -// each Go source function and generates SSA instructions for the -// function body. Initializer expressions for package-level variables -// are emitted to the package's init() function in the order specified -// by go/types.Info.InitOrder, then code for each function in the -// package is generated in lexical order. -// The BUILD phases for distinct packages are independent and are -// executed in parallel. +// It is not necessary to call CreatePackage for all dependencies of +// each syntax package, only for its direct imports. (In future +// perhaps even this restriction may be lifted.) // -// TODO(adonovan): indeed, building functions is now embarrassingly parallel. -// Audit for concurrency then benchmark using more goroutines. +// Second, packages created from syntax are built, by one or more +// calls to Package.Build, which may be concurrent; or by a call to +// Program.Build, which builds all packages in parallel. Building +// traverses the type-annotated syntax tree of each function body and +// creates SSA-form IR, a control-flow graph of instructions, +// populating fields such as Function.Body, .Params, and others. // -// State: +// Building may create additional methods, including: +// - wrapper methods (e.g. for embeddding, or implicit &recv) +// - bound method closures (e.g. for use(recv.f)) +// - thunks (e.g. for use(I.f) or use(T.f)) +// - generic instances (e.g. to produce f[int] from f[any]). +// As these methods are created, they are added to the build queue, +// and then processed in turn, until a fixed point is reached, +// Since these methods might belong to packages that were not +// created (by a call to CreatePackage), their Pkg field is unset. // -// The Package's and Program's indices (maps) are populated and -// mutated during the CREATE phase, but during the BUILD phase they -// remain constant. The sole exception is Prog.methodSets and its -// related maps, which are protected by a dedicated mutex. +// Instances of generic functions may be either instantiated (f[int] +// is a copy of f[T] with substitutions) or wrapped (f[int] delegates +// to f[T]), depending on the availability of generic syntax and the +// InstantiateGenerics mode flag. // -// Generic functions declared in a package P can be instantiated from functions -// outside of P. This happens independently of the CREATE and BUILD phase of P. +// Each package has an initializer function named "init" that calls +// the initializer functions of each direct import, computes and +// assigns the initial value of each global variable, and calls each +// source-level function named "init". (These generate SSA functions +// named "init#1", "init#2", etc.) // -// Locks: +// Runtime types // -// Mutexes are currently acquired according to the following order: -// Prog.methodsMu ⊃ canonizer.mu ⊃ printMu -// where x ⊃ y denotes that y can be acquired while x is held -// and x cannot be acquired while y is held. +// Each MakeInterface operation is a conversion from a non-interface +// type to an interface type. The semantics of this operation requires +// a runtime type descriptor, which is the type portion of an +// interface, and the value abstracted by reflect.Type. // -// Synthetics: +// The program accumulates all non-parameterized types that are +// encountered as MakeInterface operands, along with all types that +// may be derived from them using reflection. This set is available as +// Program.RuntimeTypes, and the methods of these types may be +// reachable via interface calls or reflection even if they are never +// referenced from the SSA IR. (In practice, algorithms such as RTA +// that compute reachability from package main perform their own +// tracking of runtime types at a finer grain, so this feature is not +// very useful.) // -// During the BUILD phase new functions can be created and built. These include: -// - wrappers (wrappers, bounds, thunks) -// - generic function instantiations -// These functions do not belong to a specific Pkg (Pkg==nil). Instead the -// Package that led to them being CREATED is obligated to ensure these -// are BUILT during the BUILD phase of the Package. +// Function literals // -// Runtime types: +// Anonymous functions must be built as soon as they are encountered, +// as it may affect locals of the enclosing function, but they are not +// marked 'built' until the end of the outermost enclosing function. +// (Among other things, this causes them to be logged in top-down order.) // -// A concrete type is a type that is fully monomorphized with concrete types, -// i.e. it cannot reach a TypeParam type. -// Some concrete types require full runtime type information. Cases -// include checking whether a type implements an interface or -// interpretation by the reflect package. All such types that may require -// this information will have all of their method sets built and will be added to Prog.methodSets. -// A type T is considered to require runtime type information if it is -// a runtime type and has a non-empty method set and either: -// - T flows into a MakeInterface instructions, -// - T appears in a concrete exported member, or -// - T is a type reachable from a type S that has non-empty method set. -// For any such type T, method sets must be created before the BUILD -// phase of the package is done. -// -// Function literals: -// -// The BUILD phase of a function literal (anonymous function) is tied to the -// BUILD phase of the enclosing parent function. The FreeVars of an anonymous -// function are discovered by building the anonymous function. This in turn -// changes which variables must be bound in a MakeClosure instruction in the -// parent. Anonymous functions also track where they are referred to in their -// parent function. -// -// Happens-before: -// -// The above discussion leads to the following happens-before relation for -// the BUILD and CREATE phases. -// The happens-before relation (with X 0 { targs := fn.subst.types(instanceArgs(fn.info, e)) - callee = fn.Prog.needsInstance(callee, targs, b.created) + callee = callee.instance(targs, b.created) } return callee } // Local var. - return emitLoad(fn, fn.lookup(obj, false)) // var (address) + return emitLoad(fn, fn.lookup(obj.(*types.Var), false)) // var (address) case *ast.SelectorExpr: sel := fn.selection(e) @@ -823,7 +787,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case types.MethodExpr: // (*T).f or T.f, the method f from the method-set of type T. // The result is a "thunk". - thunk := makeThunk(fn.Prog, sel, b.created) + thunk := createThunk(fn.Prog, sel, b.created) return emitConv(fn, thunk, fn.typ(tv.Type)) case types.MethodVal: @@ -831,14 +795,14 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { // The result is a "bound". obj := sel.obj.(*types.Func) rt := fn.typ(recvType(obj)) - wantAddr := isPointer(rt) + _, wantAddr := deref(rt) escaping := true v := b.receiver(fn, e.X, wantAddr, escaping, sel) if types.IsInterface(rt) { // If v may be an interface type I (after instantiating), // we must emit a check that v is non-nil. - if recv, ok := sel.recv.(*typeparams.TypeParam); ok { + if recv, ok := sel.recv.(*types.TypeParam); ok { // Emit a nil check if any possible instantiation of the // type parameter is an interface type. if typeSetOf(recv).Len() > 0 { @@ -858,7 +822,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { } else { // non-type param interface // Emit nil check: typeassert v.(I). - emitTypeAssert(fn, v, rt, token.NoPos) + emitTypeAssert(fn, v, rt, e.Sel.Pos()) } } if targs := receiverTypeArgs(obj); len(targs) > 0 { @@ -866,7 +830,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { obj = fn.Prog.canon.instantiateMethod(obj, fn.subst.types(targs), fn.Prog.ctxt) } c := &MakeClosure{ - Fn: makeBound(fn.Prog, obj, b.created), + Fn: createBound(fn.Prog, obj, b.created), Bindings: []Value{v}, } c.setPos(e.Sel.Pos()) @@ -884,7 +848,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { panic("unexpected expression-relative selector") - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: // f[X, Y] must be a generic function if !instance(fn.info, e.X) { panic("unexpected expression-could not match index list to instantiation") @@ -952,14 +916,14 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // returns the effective receiver after applying the implicit field // selections of sel. // -// wantAddr requests that the result is an an address. If +// wantAddr requests that the result is an address. If // !sel.indirect, this may require that e be built in addr() mode; it // must thus be addressable. // // escaping is defined as per builder.addr(). func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *selection) Value { var v Value - if wantAddr && !sel.indirect && !isPointer(fn.typeOf(e)) { + if _, eptr := deref(fn.typeOf(e)); wantAddr && !sel.indirect && !eptr { v = b.addr(fn, e, escaping).address(fn) } else { v = b.expr(fn, e) @@ -968,7 +932,10 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se last := len(sel.index) - 1 // The position of implicit selection is the position of the inducing receiver expression. v = emitImplicitSelections(fn, v, sel.index[:last], e.Pos()) - if !wantAddr && isPointer(v.Type()) { + if types.IsInterface(v.Type()) { + // When v is an interface, sel.Kind()==MethodValue and v.f is invoked. + // So v is not loaded, even if v has a pointer core type. + } else if _, vptr := deref(v.Type()); !wantAddr && vptr { v = emitLoad(fn, v) } return v @@ -987,7 +954,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { obj := sel.obj.(*types.Func) recv := recvType(obj) - wantAddr := isPointer(recv) + _, wantAddr := deref(recv) escaping := true v := b.receiver(fn, selector.X, wantAddr, escaping, sel) if types.IsInterface(recv) { @@ -996,11 +963,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { c.Method = obj } else { // "Call"-mode call. - callee := fn.Prog.originFunc(obj) - if callee.typeparams.Len() > 0 { - callee = fn.Prog.needsInstance(callee, receiverTypeArgs(obj), b.created) - } - c.Value = callee + c.Value = fn.Prog.objectMethod(obj, b.created) c.Args = append(c.Args, v) } return @@ -1092,9 +1055,8 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx } else { // Replace a suffix of args with a slice containing it. at := types.NewArray(vt, int64(len(varargs))) - a := emitNew(fn, at, token.NoPos) + a := emitNew(fn, at, token.NoPos, "varargs") a.setPos(e.Rparen) - a.Comment = "varargs" for i, arg := range varargs { iaddr := &IndexAddr{ X: a, @@ -1141,7 +1103,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // 1:1 assignment for i, id := range spec.Names { if !isBlankIdent(id) { - fn.addLocalForIdent(id) + emitLocalVar(fn, identVar(fn, id)) } lval := b.addr(fn, id, false) // non-escaping b.assign(fn, lval, spec.Values[i], true, nil) @@ -1152,7 +1114,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // Locals are implicitly zero-initialized. for _, id := range spec.Names { if !isBlankIdent(id) { - lhs := fn.addLocalForIdent(id) + lhs := emitLocalVar(fn, identVar(fn, id)) if fn.debugInfo() { emitDebugRef(fn, id, lhs, true) } @@ -1164,7 +1126,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { tuple := b.exprN(fn, spec.Values[0]) for i, id := range spec.Names { if !isBlankIdent(id) { - fn.addLocalForIdent(id) + emitLocalVar(fn, identVar(fn, id)) lhs := b.addr(fn, id, false) // non-escaping lhs.store(fn, emitExtract(fn, tuple, i)) } @@ -1184,8 +1146,8 @@ func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { var lval lvalue = blank{} if !isBlankIdent(lhs) { if isDef { - if obj := fn.info.Defs[lhs.(*ast.Ident)]; obj != nil { - fn.addNamedLocal(obj) + if obj, ok := fn.info.Defs[lhs.(*ast.Ident)].(*types.Var); ok { + emitLocalVar(fn, obj) isZero[i] = true } } @@ -1253,37 +1215,13 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { - typ := deref(fn.typeOf(e)) // type with name [may be type param] - t := deref(typeparams.CoreType(typ)).Underlying() // core type for comp lit case - // Computing typ and t is subtle as these handle pointer types. - // For example, &T{...} is valid even for maps and slices. - // Also typ should refer to T (not *T) while t should be the core type of T. - // - // To show the ordering to take into account, consider the composite literal - // expressions `&T{f: 1}` and `{f: 1}` within the expression `[]S{{f: 1}}` here: - // type N struct{f int} - // func _[T N, S *N]() { - // _ = &T{f: 1} - // _ = []S{{f: 1}} - // } - // For `&T{f: 1}`, we compute `typ` and `t` as: - // typeOf(&T{f: 1}) == *T - // deref(*T) == T (typ) - // CoreType(T) == N - // deref(N) == N - // N.Underlying() == struct{f int} (t) - // For `{f: 1}` in `[]S{{f: 1}}`, we compute `typ` and `t` as: - // typeOf({f: 1}) == S - // deref(S) == S (typ) - // CoreType(S) == *N - // deref(*N) == N - // N.Underlying() == struct{f int} (t) - switch t := t.(type) { + typ, _ := deref(fn.typeOf(e)) // type with name [may be type param] + switch t := typeparams.CoreType(typ).(type) { case *types.Struct: if !isZero && len(e.Elts) != t.NumFields() { // memclear - sb.store(&address{addr, e.Lbrace, nil}, - zeroValue(fn, deref(addr.Type()))) + zt, _ := deref(addr.Type()) + sb.store(&address{addr, e.Lbrace, nil}, zeroConst(zt)) isZero = true } for i, e := range e.Elts { @@ -1318,17 +1256,15 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero switch t := t.(type) { case *types.Slice: at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) - alloc := emitNew(fn, at, e.Lbrace) - alloc.Comment = "slicelit" - array = alloc + array = emitNew(fn, at, e.Lbrace, "slicelit") case *types.Array: at = t array = addr if !isZero && int64(len(e.Elts)) != at.Len() { // memclear - sb.store(&address{array, e.Lbrace, nil}, - zeroValue(fn, deref(array.Type()))) + zt, _ := deref(array.Type()) + sb.store(&address{array, e.Lbrace, nil}, zeroConst(zt)) } } @@ -1381,8 +1317,13 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero // map[*struct{}]bool{{}: true} // An &-operation may be implied: // map[*struct{}]bool{&struct{}{}: true} + wantAddr := false + if _, ok := unparen(e.Key).(*ast.CompositeLit); ok { + _, wantAddr = deref(t.Key()) + } + var key Value - if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) { + if wantAddr { // A CompositeLit never evaluates to a pointer, // so if the type of the location is a pointer, // an &-operation is implied. @@ -1409,7 +1350,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) default: - panic("unexpected CompositeLit type: " + t.String()) + panic("unexpected CompositeLit type: " + typ.String()) } } @@ -1603,13 +1544,13 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl } func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { - if obj := fn.info.Implicits[cc]; obj != nil { + if obj, ok := fn.info.Implicits[cc].(*types.Var); ok { // In a switch y := x.(type), each case clause // implicitly declares a distinct object y. // In a single-type case, y has that type. // In multi-type cases, 'case nil' and default, // y has the same type as the interface operand. - emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) + emitStore(fn, emitLocalVar(fn, obj), x, obj.Pos()) } fn.targets = &targets{ tail: fn.targets, @@ -1758,7 +1699,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { case *ast.AssignStmt: // x := <-states[state].Chan if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) + emitLocalVar(fn, identVar(fn, comm.Lhs[0].(*ast.Ident))) } x := b.addr(fn, comm.Lhs[0], false) // non-escaping v := emitExtract(fn, sel, r) @@ -1769,7 +1710,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { if len(comm.Lhs) == 2 { // x, ok := ... if comm.Tok == token.DEFINE { - fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) + emitLocalVar(fn, identVar(fn, comm.Lhs[1].(*ast.Ident))) } ok := b.addr(fn, comm.Lhs[1], false) // non-escaping ok.store(fn, emitExtract(fn, sel, 1)) @@ -1804,20 +1745,32 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { // forStmt emits to fn code for the for statement s, optionally // labelled by label. func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { - // ...init... - // jump loop + // Use forStmtGo122 instead if it applies. + if s.Init != nil { + if assign, ok := s.Init.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE { + afterGo122 := versions.Compare(fn.goversion, "go1.21") > 0 + if afterGo122 { + b.forStmtGo122(fn, s, label) + return + } + } + } + + // ...init... + // jump loop // loop: - // if cond goto body else done + // if cond goto body else done // body: - // ...body... - // jump post - // post: (target of continue) - // ...post... - // jump loop + // ...body... + // jump post + // post: (target of continue) + // ...post... + // jump loop // done: (target of break) if s.Init != nil { b.stmt(fn, s.Init) } + body := fn.newBasicBlock("for.body") done := fn.newBasicBlock("for.done") // target of 'break' loop := body // target of back-edge @@ -1855,35 +1808,199 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { fn.currentBlock = done } +// forStmtGo122 emits to fn code for the for statement s, optionally +// labelled by label. s must define its variables. +// +// This allocates once per loop iteration. This is only correct in +// GoVersions >= go1.22. +func (b *builder) forStmtGo122(fn *Function, s *ast.ForStmt, label *lblock) { + // i_outer = alloc[T] + // *i_outer = ...init... // under objects[i] = i_outer + // jump loop + // loop: + // i = phi [head: i_outer, loop: i_next] + // ...cond... // under objects[i] = i + // if cond goto body else done + // body: + // ...body... // under objects[i] = i (same as loop) + // jump post + // post: + // tmp = *i + // i_next = alloc[T] + // *i_next = tmp + // ...post... // under objects[i] = i_next + // goto loop + // done: + + init := s.Init.(*ast.AssignStmt) + startingBlocks := len(fn.Blocks) + + pre := fn.currentBlock // current block before starting + loop := fn.newBasicBlock("for.loop") // target of back-edge + body := fn.newBasicBlock("for.body") + post := fn.newBasicBlock("for.post") // target of 'continue' + done := fn.newBasicBlock("for.done") // target of 'break' + + // For each of the n loop variables, we create five SSA values, + // outer, phi, next, load, and store in pre, loop, and post. + // There is no limit on n. + type loopVar struct { + obj *types.Var + outer *Alloc + phi *Phi + load *UnOp + next *Alloc + store *Store + } + vars := make([]loopVar, len(init.Lhs)) + for i, lhs := range init.Lhs { + v := identVar(fn, lhs.(*ast.Ident)) + typ := fn.typ(v.Type()) + + fn.currentBlock = pre + outer := emitLocal(fn, typ, v.Pos(), v.Name()) + + fn.currentBlock = loop + phi := &Phi{Comment: v.Name()} + phi.pos = v.Pos() + phi.typ = outer.Type() + fn.emit(phi) + + fn.currentBlock = post + // If next is is local, it reuses the address and zeroes the old value so + // load before allocating next. + load := emitLoad(fn, phi) + next := emitLocal(fn, typ, v.Pos(), v.Name()) + store := emitStore(fn, next, load, token.NoPos) + + phi.Edges = []Value{outer, next} // pre edge is emitted before post edge. + + vars[i] = loopVar{v, outer, phi, load, next, store} + } + + // ...init... under fn.objects[v] = i_outer + fn.currentBlock = pre + for _, v := range vars { + fn.vars[v.obj] = v.outer + } + const isDef = false // assign to already-allocated outers + b.assignStmt(fn, init.Lhs, init.Rhs, isDef) + if label != nil { + label._break = done + label._continue = post + } + emitJump(fn, loop) + + // ...cond... under fn.objects[v] = i + fn.currentBlock = loop + for _, v := range vars { + fn.vars[v.obj] = v.phi + } + if s.Cond != nil { + b.cond(fn, s.Cond, body, done) + } else { + emitJump(fn, body) + } + + // ...body... under fn.objects[v] = i + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _continue: post, + } + b.stmt(fn, s.Body) + fn.targets = fn.targets.tail + emitJump(fn, post) + + // ...post... under fn.objects[v] = i_next + for _, v := range vars { + fn.vars[v.obj] = v.next + } + fn.currentBlock = post + if s.Post != nil { + b.stmt(fn, s.Post) + } + emitJump(fn, loop) // back-edge + fn.currentBlock = done + + // For each loop variable that does not escape, + // (the common case), fuse its next cells into its + // (local) outer cell as they have disjoint live ranges. + // + // It is sufficient to test whether i_next escapes, + // because its Heap flag will be marked true if either + // the cond or post expression causes i to escape + // (because escape distributes over phi). + var nlocals int + for _, v := range vars { + if !v.next.Heap { + nlocals++ + } + } + if nlocals > 0 { + replace := make(map[Value]Value, 2*nlocals) + dead := make(map[Instruction]bool, 4*nlocals) + for _, v := range vars { + if !v.next.Heap { + replace[v.next] = v.outer + replace[v.phi] = v.outer + dead[v.phi], dead[v.next], dead[v.load], dead[v.store] = true, true, true, true + } + } + + // Replace all uses of i_next and phi with i_outer. + // Referrers have not been built for fn yet so only update Instruction operands. + // We need only look within the blocks added by the loop. + var operands []*Value // recycle storage + for _, b := range fn.Blocks[startingBlocks:] { + for _, instr := range b.Instrs { + operands = instr.Operands(operands[:0]) + for _, ptr := range operands { + k := *ptr + if v := replace[k]; v != nil { + *ptr = v + } + } + } + } + + // Remove instructions for phi, load, and store. + // lift() will remove the unused i_next *Alloc. + isDead := func(i Instruction) bool { return dead[i] } + loop.Instrs = removeInstrsIf(loop.Instrs, isDead) + post.Instrs = removeInstrsIf(post.Instrs, isDead) + } +} + // rangeIndexed emits to fn the header for an integer-indexed loop // over array, *array or slice value x. // The v result is defined only if tv is non-nil. // forPos is the position of the "for" token. func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { // - // length = len(x) - // index = -1 - // loop: (target of continue) - // index++ - // if index < length goto body else done + // length = len(x) + // index = -1 + // loop: (target of continue) + // index++ + // if index < length goto body else done // body: - // k = index - // v = x[index] - // ...body... - // jump loop - // done: (target of break) + // k = index + // v = x[index] + // ...body... + // jump loop + // done: (target of break) // Determine number of iterations. var length Value - if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok { + dt, _ := deref(x.Type()) + if arr, ok := typeparams.CoreType(dt).(*types.Array); ok { // For array or *array, the number of iterations is // known statically thanks to the type. We avoid a // data dependence upon x, permitting later dead-code // elimination if x is pure, static unrolling, etc. // Ranging over a nil *array may have >0 iterations. // We still generate code for x, in case it has effects. - // - // TypeParams do not have constant length. Use underlying instead of core type. length = intConst(arr.Len()) } else { // length = len(x). @@ -1894,7 +2011,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P length = fn.emit(&c) } - index := fn.addLocal(tInt, token.NoPos) + index := emitLocal(fn, tInt, token.NoPos, "rangeindex") emitStore(fn, index, intConst(-1), pos) loop = fn.newBasicBlock("rangeindex.loop") @@ -1957,16 +2074,16 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // if the respective component is not wanted. func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { // - // it = range x + // it = range x // loop: (target of continue) - // okv = next it (ok, key, value) - // ok = extract okv #0 - // if ok goto body else done + // okv = next it (ok, key, value) + // ok = extract okv #0 + // if ok goto body else done // body: - // k = extract okv #1 - // v = extract okv #2 - // ...body... - // jump loop + // k = extract okv #1 + // v = extract okv #2 + // ...body... + // jump loop // done: (target of break) // @@ -2019,13 +2136,13 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token. func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { // // loop: (target of continue) - // ko = <-x (key, ok) - // ok = extract ko #1 - // if ok goto body else done + // ko = <-x (key, ok) + // ok = extract ko #1 + // if ok goto body else done // body: - // k = extract ko #0 - // ... - // goto loop + // k = extract ko #0 + // ...body... + // goto loop // done: (target of break) loop = fn.newBasicBlock("rangechan.loop") @@ -2052,6 +2169,57 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) return } +// rangeInt emits to fn the header for a range loop with an integer operand. +// tk is the key value's type, or nil if the k result is not wanted. +// pos is the position of the "for" token. +func (b *builder) rangeInt(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { + // + // iter = 0 + // if 0 < x goto body else done + // loop: (target of continue) + // iter++ + // if iter < x goto body else done + // body: + // k = x + // ...body... + // jump loop + // done: (target of break) + + if isUntyped(x.Type()) { + x = emitConv(fn, x, tInt) + } + + T := x.Type() + iter := emitLocal(fn, T, token.NoPos, "rangeint.iter") + // x may be unsigned. Avoid initializing x to -1. + + body := fn.newBasicBlock("rangeint.body") + done = fn.newBasicBlock("rangeint.done") + emitIf(fn, emitCompare(fn, token.LSS, zeroConst(T), x, token.NoPos), body, done) + + loop = fn.newBasicBlock("rangeint.loop") + fn.currentBlock = loop + + incr := &BinOp{ + Op: token.ADD, + X: emitLoad(fn, iter), + Y: emitConv(fn, vOne, T), + } + incr.setType(T) + emitStore(fn, iter, fn.emit(incr), pos) + emitIf(fn, emitCompare(fn, token.LSS, incr, x, token.NoPos), body, done) + fn.currentBlock = body + + if tk != nil { + // Integer types (int, uint8, etc.) are named and + // we know that k is assignable to x when tk != nil. + // This implies tk and T are identical so no conversion is needed. + k = emitLoad(fn, iter) + } + + return +} + // rangeStmt emits to fn code for the range statement s, optionally // labelled by label. func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { @@ -2063,21 +2231,26 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { tv = fn.typeOf(s.Value) } - // If iteration variables are defined (:=), this - // occurs once outside the loop. - // - // Unlike a short variable declaration, a RangeStmt - // using := never redeclares an existing variable; it - // always creates a new one. - if s.Tok == token.DEFINE { + // create locals for s.Key and s.Value. + createVars := func() { + // Unlike a short variable declaration, a RangeStmt + // using := never redeclares an existing variable; it + // always creates a new one. if tk != nil { - fn.addLocalForIdent(s.Key.(*ast.Ident)) + emitLocalVar(fn, identVar(fn, s.Key.(*ast.Ident))) } if tv != nil { - fn.addLocalForIdent(s.Value.(*ast.Ident)) + emitLocalVar(fn, identVar(fn, s.Value.(*ast.Ident))) } } + afterGo122 := versions.Compare(fn.goversion, "go1.21") > 0 + if s.Tok == token.DEFINE && !afterGo122 { + // pre-go1.22: If iteration variables are defined (:=), this + // occurs once outside the loop. + createVars() + } + x := b.expr(fn, s.X) var k, v Value @@ -2089,13 +2262,30 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { case *types.Chan: k, loop, done = b.rangeChan(fn, x, tk, s.For) - case *types.Map, *types.Basic: // string + case *types.Map: k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + case *types.Basic: + switch { + case rt.Info()&types.IsString != 0: + k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + + case rt.Info()&types.IsInteger != 0: + k, loop, done = b.rangeInt(fn, x, tk, s.For) + + default: + panic("Cannot range over basic type: " + rt.String()) + } + default: panic("Cannot range over: " + rt.String()) } + if s.Tok == token.DEFINE && afterGo122 { + // go1.22: If iteration variables are defined (:=), this occurs inside the loop. + createVars() + } + // Evaluate both LHS expressions before we update either. var kl, vl lvalue if tk != nil { @@ -2319,73 +2509,71 @@ start: } } +// A buildFunc is a strategy for building the SSA body for a function. +type buildFunc = func(*builder, *Function) + +// iterate causes all created but unbuilt functions to be built. As +// this may create new methods, the process is iterated until it +// converges. +func (b *builder) iterate() { + for ; b.finished < b.created.Len(); b.finished++ { + fn := b.created.At(b.finished) + b.buildFunction(fn) + } +} + // buildFunction builds SSA code for the body of function fn. Idempotent. func (b *builder) buildFunction(fn *Function) { - if !fn.built { + if fn.build != nil { assert(fn.parent == nil, "anonymous functions should not be built by buildFunction()") - b.buildFunctionBody(fn) + + if fn.Prog.mode&LogSource != 0 { + defer logStack("build %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + } + fn.build(b, fn) fn.done() } } -// buildFunctionBody builds SSA code for the body of function fn. -// -// fn is not done building until fn.done() is called. -func (b *builder) buildFunctionBody(fn *Function) { - // TODO(taking): see if this check is reachable. - if fn.Blocks != nil { - return // building already started +// buildParamsOnly builds fn.Params from fn.Signature, but does not build fn.Body. +func (b *builder) buildParamsOnly(fn *Function) { + // For external (C, asm) functions or functions loaded from + // export data, we must set fn.Params even though there is no + // body code to reference them. + if recv := fn.Signature.Recv(); recv != nil { + fn.addParamVar(recv) } + params := fn.Signature.Params() + for i, n := 0, params.Len(); i < n; i++ { + fn.addParamVar(params.At(i)) + } +} - var recvField *ast.FieldList - var body *ast.BlockStmt - var functype *ast.FuncType - switch n := fn.syntax.(type) { - case nil: - if fn.Params != nil { - return // not a Go source function. (Synthetic, or from object file.) - } +// buildFromSyntax builds fn.Body from fn.syntax, which must be non-nil. +func (b *builder) buildFromSyntax(fn *Function) { + var ( + recvField *ast.FieldList + body *ast.BlockStmt + functype *ast.FuncType + ) + switch syntax := fn.syntax.(type) { case *ast.FuncDecl: - functype = n.Type - recvField = n.Recv - body = n.Body + functype = syntax.Type + recvField = syntax.Recv + body = syntax.Body + if body == nil { + b.buildParamsOnly(fn) // no body (non-Go function) + return + } case *ast.FuncLit: - functype = n.Type - body = n.Body + functype = syntax.Type + body = syntax.Body + case nil: + panic("no syntax") default: - panic(n) - } - - if body == nil { - // External function. - if fn.Params == nil { - // This condition ensures we add a non-empty - // params list once only, but we may attempt - // the degenerate empty case repeatedly. - // TODO(adonovan): opt: don't do that. - - // We set Function.Params even though there is no body - // code to reference them. This simplifies clients. - if recv := fn.Signature.Recv(); recv != nil { - fn.addParamObj(recv) - } - params := fn.Signature.Params() - for i, n := 0, params.Len(); i < n; i++ { - fn.addParamObj(params.At(i)) - } - } - return + panic(syntax) // unexpected syntax } - // Build instantiation wrapper around generic body? - if fn.topLevelOrigin != nil && fn.subst == nil { - buildInstantiationWrapper(fn) - return - } - - if fn.Prog.mode&LogSource != 0 { - defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() - } fn.startBody() fn.createSyntacticParams(recvField, functype) b.stmt(fn, body) @@ -2403,45 +2591,17 @@ func (b *builder) buildFunctionBody(fn *Function) { fn.finishBody() } -// buildCreated does the BUILD phase for each function created by builder that is not yet BUILT. -// Functions are built using buildFunction. +// addRuntimeType records t as a runtime type, +// along with all types derivable from it using reflection. // -// May add types that require runtime type information to builder. -func (b *builder) buildCreated() { - for ; b.finished < b.created.Len(); b.finished++ { - fn := b.created.At(b.finished) - b.buildFunction(fn) - } -} - -// Adds any needed runtime type information for the created functions. -// -// May add newly CREATEd functions that may need to be built or runtime type information. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -func (b *builder) needsRuntimeTypes() { - if b.created.Len() == 0 { - return - } - prog := b.created.At(0).Prog - - var rtypes []types.Type - for ; b.rtypes < b.finished; b.rtypes++ { - fn := b.created.At(b.rtypes) - rtypes = append(rtypes, mayNeedRuntimeTypes(fn)...) - } - - // Calling prog.needMethodsOf(T) on a basic type T is a no-op. - // Filter out the basic types to reduce acquiring prog.methodsMu. - rtypes = nonbasicTypes(rtypes) - - for _, T := range rtypes { - prog.needMethodsOf(T, b.created) - } -} - -func (b *builder) done() bool { - return b.rtypes >= b.created.Len() +// Acquires prog.runtimeTypesMu. +func addRuntimeType(prog *Program, t types.Type) { + prog.runtimeTypesMu.Lock() + defer prog.runtimeTypesMu.Unlock() + forEachReachable(&prog.MethodSets, t, func(t types.Type) bool { + prev, _ := prog.runtimeTypes.Set(t, true).(bool) + return !prev // already seen? + }) } // Build calls Package.Build for each package in prog. @@ -2469,9 +2629,11 @@ func (prog *Program) Build() { // Build builds SSA code for all functions and vars in package p. // -// Precondition: CreatePackage must have been called for all of p's -// direct imports (and hence its direct imports must have been -// error-free). +// CreatePackage must have been called for all of p's direct imports +// (and hence its direct imports must have been error-free). It is not +// necessary to call CreatePackage for indirect dependencies. +// Functions will be created for all necessary methods in those +// packages on demand. // // Build is idempotent and thread-safe. func (p *Package) Build() { p.buildOnce.Do(p.build) } @@ -2480,45 +2642,39 @@ func (p *Package) build() { if p.info == nil { return // synthetic package, e.g. "testmain" } - - // Ensure we have runtime type info for all exported members. - // Additionally filter for just concrete types that can be runtime types. - // - // TODO(adonovan): ideally belongs in memberFromObject, but - // that would require package creation in topological order. - for name, mem := range p.Members { - isGround := func(m Member) bool { - switch m := m.(type) { - case *Type: - named, _ := m.Type().(*types.Named) - return named == nil || typeparams.ForNamed(named) == nil - case *Function: - return m.typeparams.Len() == 0 - } - return true // *NamedConst, *Global - } - if ast.IsExported(name) && isGround(mem) { - p.Prog.needMethodsOf(mem.Type(), &p.created) - } - } if p.Prog.mode&LogSource != 0 { defer logStack("build %s", p)() } b := builder{created: &p.created} - init := p.init - init.startBody() + b.iterate() + + // We no longer need transient information: ASTs or go/types deductions. + p.info = nil + p.created = nil + p.files = nil + p.initVersion = nil + + if p.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckPackage(p) + } +} + +// buildPackageInit builds fn.Body for the synthetic package initializer. +func (b *builder) buildPackageInit(fn *Function) { + p := fn.Pkg + fn.startBody() var done *BasicBlock if p.Prog.mode&BareInits == 0 { // Make init() skip if package is already initialized. initguard := p.Var("init$guard") - doinit := init.newBasicBlock("init.start") - done = init.newBasicBlock("init.done") - emitIf(init, emitLoad(init, initguard), done, doinit) - init.currentBlock = doinit - emitStore(init, initguard, vTrue, token.NoPos) + doinit := fn.newBasicBlock("init.start") + done = fn.newBasicBlock("init.done") + emitIf(fn, emitLoad(fn, initguard), done, doinit) + fn.currentBlock = doinit + emitStore(fn, initguard, vTrue, token.NoPos) // Call the init() function of each package we import. for _, pkg := range p.Pkg.Imports() { @@ -2528,9 +2684,9 @@ func (p *Package) build() { } var v Call v.Call.Value = prereq.init - v.Call.pos = init.pos + v.Call.pos = fn.pos v.setType(types.NewTuple()) - init.emit(&v) + fn.emit(&v) } } @@ -2538,11 +2694,18 @@ func (p *Package) build() { if len(p.info.InitOrder) > 0 && len(p.files) == 0 { panic("no source files provided for package. cannot initialize globals") } + for _, varinit := range p.info.InitOrder { - if init.Prog.mode&LogSource != 0 { + if fn.Prog.mode&LogSource != 0 { fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n", varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos())) } + // Initializers for global vars are evaluated in dependency + // order, but may come from arbitrary files of the package + // with different versions, so we transiently update + // fn.goversion for each one. (Since init is a synthetic + // function it has no syntax of its own that needs a version.) + fn.goversion = p.initVersion[varinit.Rhs] if len(varinit.Lhs) == 1 { // 1:1 initialization: var x, y = a(), b() var lval lvalue @@ -2551,28 +2714,33 @@ func (p *Package) build() { } else { lval = blank{} } - b.assign(init, lval, varinit.Rhs, true, nil) + b.assign(fn, lval, varinit.Rhs, true, nil) } else { // n:1 initialization: var x, y := f() - tuple := b.exprN(init, varinit.Rhs) + tuple := b.exprN(fn, varinit.Rhs) for i, v := range varinit.Lhs { if v.Name() == "_" { continue } - emitStore(init, p.objects[v].(*Global), emitExtract(init, tuple, i), v.Pos()) + emitStore(fn, p.objects[v].(*Global), emitExtract(fn, tuple, i), v.Pos()) } } } + // The rest of the init function is synthetic: + // no syntax, info, goversion. + fn.info = nil + fn.goversion = "" + // Call all of the declared init() functions in source order. for _, file := range p.files { for _, decl := range file.Decls { if decl, ok := decl.(*ast.FuncDecl); ok { id := decl.Name if !isBlankIdent(id) && id.Name == "init" && decl.Recv == nil { - fn := p.objects[p.info.Defs[id]].(*Function) + declaredInit := p.objects[p.info.Defs[id]].(*Function) var v Call - v.Call.Value = fn + v.Call.Value = declaredInit v.setType(types.NewTuple()) p.init.emit(&v) } @@ -2582,35 +2750,9 @@ func (p *Package) build() { // Finish up init(). if p.Prog.mode&BareInits == 0 { - emitJump(init, done) - init.currentBlock = done - } - init.emit(new(Return)) - init.finishBody() - init.done() - - // Build all CREATEd functions and add runtime types. - // These Functions include package-level functions, init functions, methods, and synthetic (including unreachable/blank ones). - // Builds any functions CREATEd while building this package. - // - // Initially the created functions for the package are: - // [init, decl0, ... , declN] - // Where decl0, ..., declN are declared functions in source order, but it's not significant. - // - // As these are built, more functions (function literals, wrappers, etc.) can be CREATEd. - // Iterate until we reach a fixed point. - // - // Wait for init() to be BUILT as that cannot be built by buildFunction(). - // - for !b.done() { - b.buildCreated() // build any CREATEd and not BUILT function. May add runtime types. - b.needsRuntimeTypes() // Add all of the runtime type information. May CREATE Functions. - } - - p.info = nil // We no longer need ASTs or go/types deductions. - p.created = nil // We no longer need created functions. - - if p.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckPackage(p) + emitJump(fn, done) + fn.currentBlock = done } + fn.emit(new(Return)) + fn.finishBody() } diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go index 4a51a2cb4b..2a6ac5882a 100644 --- a/vendor/golang.org/x/tools/go/ssa/const.go +++ b/vendor/golang.org/x/tools/go/ssa/const.go @@ -125,7 +125,7 @@ func zeroString(t types.Type, from *types.Package) string { components[i] = zeroString(t.At(i).Type(), from) } return "(" + strings.Join(components, ", ") + ")" - case *typeparams.TypeParam: + case *types.TypeParam: return "*new(" + relType(t, from) + ")" } panic(fmt.Sprint("zeroString: unexpected ", t)) diff --git a/vendor/golang.org/x/tools/go/ssa/coretype.go b/vendor/golang.org/x/tools/go/ssa/coretype.go index 128d61e426..88136b4384 100644 --- a/vendor/golang.org/x/tools/go/ssa/coretype.go +++ b/vendor/golang.org/x/tools/go/ssa/coretype.go @@ -40,19 +40,19 @@ func isBytestring(T types.Type) bool { } // termList is a list of types. -type termList []*typeparams.Term // type terms of the type set +type termList []*types.Term // type terms of the type set func (s termList) Len() int { return len(s) } func (s termList) At(i int) types.Type { return s[i].Type() } // typeSetOf returns the type set of typ. Returns an empty typeset on an error. func typeSetOf(typ types.Type) termList { // This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on. - var terms []*typeparams.Term + var terms []*types.Term var err error switch typ := typ.(type) { - case *typeparams.TypeParam: + case *types.TypeParam: terms, err = typeparams.StructuralTerms(typ) - case *typeparams.Union: + case *types.Union: terms, err = typeparams.UnionTermSet(typ) case *types.Interface: terms, err = typeparams.InterfaceTermSet(typ) @@ -60,7 +60,7 @@ func typeSetOf(typ types.Type) termList { // Common case. // Specializing the len=1 case to avoid a slice // had no measurable space/time benefit. - terms = []*typeparams.Term{typeparams.NewTerm(false, typ)} + terms = []*types.Term{types.NewTerm(false, typ)} } if err != nil { diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go index ccb20e7968..c4da35d0b0 100644 --- a/vendor/golang.org/x/tools/go/ssa/create.go +++ b/vendor/golang.org/x/tools/go/ssa/create.go @@ -15,41 +15,43 @@ import ( "os" "sync" - "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) // NewProgram returns a new SSA Program. // // mode controls diagnostics and checking during SSA construction. +// +// To construct an SSA program: +// +// - Call NewProgram to create an empty Program. +// - Call CreatePackage providing typed syntax for each package +// you want to build, and call it with types but not +// syntax for each of those package's direct dependencies. +// - Call [Package.Build] on each syntax package you wish to build, +// or [Program.Build] to build all of them. +// +// See the Example tests for simple examples. func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { - prog := &Program{ + return &Program{ Fset: fset, imported: make(map[string]*Package), packages: make(map[*types.Package]*Package), - thunks: make(map[selectionKey]*Function), - bounds: make(map[boundsKey]*Function), mode: mode, canon: newCanonizer(), - ctxt: typeparams.NewContext(), - instances: make(map[*Function]*instanceSet), + ctxt: types.NewContext(), parameterized: tpWalker{seen: make(map[types.Type]bool)}, } - - h := typeutil.MakeHasher() // protected by methodsMu, in effect - prog.methodSets.SetHasher(h) - prog.runtimeTypes.SetHasher(h) - - return prog } // memberFromObject populates package pkg with a member for the // typechecker object obj. // // For objects from Go source code, syntax is the associated syntax -// tree (for funcs and vars only); it will be used during the build +// tree (for funcs and vars only) and goversion defines the +// appropriate interpretation; they will be used during the build // phase. -func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { +func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node, goversion string) { name := obj.Name() switch obj := obj.(type) { case *types.Builtin: @@ -58,9 +60,11 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { } case *types.TypeName: - pkg.Members[name] = &Type{ - object: obj, - pkg: pkg, + if name != "_" { + pkg.Members[name] = &Type{ + object: obj, + pkg: pkg, + } } case *types.Const: @@ -70,7 +74,9 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { pkg: pkg, } pkg.objects[obj] = c - pkg.Members[name] = c + if name != "_" { + pkg.Members[name] = c + } case *types.Var: g := &Global{ @@ -81,7 +87,9 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { pos: obj.Pos(), } pkg.objects[obj] = g - pkg.Members[name] = g + if name != "_" { + pkg.Members[name] = g + } case *types.Func: sig := obj.Type().(*types.Signature) @@ -89,36 +97,10 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { pkg.ninit++ name = fmt.Sprintf("init#%d", pkg.ninit) } - - // Collect type parameters if this is a generic function/method. - var tparams *typeparams.TypeParamList - if rtparams := typeparams.RecvTypeParams(sig); rtparams.Len() > 0 { - tparams = rtparams - } else if sigparams := typeparams.ForSignature(sig); sigparams.Len() > 0 { - tparams = sigparams - } - - fn := &Function{ - name: name, - object: obj, - Signature: sig, - syntax: syntax, - pos: obj.Pos(), - Pkg: pkg, - Prog: pkg.Prog, - typeparams: tparams, - info: pkg.info, - } - pkg.created.Add(fn) - if syntax == nil { - fn.Synthetic = "loaded from gc object file" - } - if tparams.Len() > 0 { - fn.Prog.createInstanceSet(fn) - } - + fn := createFunction(pkg.Prog, obj, name, syntax, pkg.info, goversion, &pkg.created) + fn.Pkg = pkg pkg.objects[obj] = fn - if sig.Recv() == nil { + if name != "_" && sig.Recv() == nil { pkg.Members[name] = fn // package-level function } @@ -127,45 +109,79 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { } } +// createFunction creates a function or method. It supports both +// CreatePackage (with or without syntax) and the on-demand creation +// of methods in non-created packages based on their types.Func. +func createFunction(prog *Program, obj *types.Func, name string, syntax ast.Node, info *types.Info, goversion string, cr *creator) *Function { + sig := obj.Type().(*types.Signature) + + // Collect type parameters. + var tparams *types.TypeParamList + if rtparams := sig.RecvTypeParams(); rtparams.Len() > 0 { + tparams = rtparams // method of generic type + } else if sigparams := sig.TypeParams(); sigparams.Len() > 0 { + tparams = sigparams // generic function + } + + /* declared function/method (from syntax or export data) */ + fn := &Function{ + name: name, + object: obj, + Signature: sig, + build: (*builder).buildFromSyntax, + syntax: syntax, + info: info, + goversion: goversion, + pos: obj.Pos(), + Pkg: nil, // may be set by caller + Prog: prog, + typeparams: tparams, + } + if fn.syntax == nil { + fn.Synthetic = "from type information" + fn.build = (*builder).buildParamsOnly + } + if tparams.Len() > 0 { + fn.generic = new(generic) + } + cr.Add(fn) + return fn +} + // membersFromDecl populates package pkg with members for each // typechecker object (var, func, const or type) associated with the // specified decl. -func membersFromDecl(pkg *Package, decl ast.Decl) { +func membersFromDecl(pkg *Package, decl ast.Decl, goversion string) { switch decl := decl.(type) { case *ast.GenDecl: // import, const, type or var switch decl.Tok { case token.CONST: for _, spec := range decl.Specs { for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } + memberFromObject(pkg, pkg.info.Defs[id], nil, "") } } case token.VAR: for _, spec := range decl.Specs { + for _, rhs := range spec.(*ast.ValueSpec).Values { + pkg.initVersion[rhs] = goversion + } for _, id := range spec.(*ast.ValueSpec).Names { - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], spec) - } + memberFromObject(pkg, pkg.info.Defs[id], spec, goversion) } } case token.TYPE: for _, spec := range decl.Specs { id := spec.(*ast.TypeSpec).Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], nil) - } + memberFromObject(pkg, pkg.info.Defs[id], nil, "") } } case *ast.FuncDecl: id := decl.Name - if !isBlankIdent(id) { - memberFromObject(pkg, pkg.info.Defs[id], decl) - } + memberFromObject(pkg, pkg.info.Defs[id], decl, goversion) } } @@ -182,7 +198,7 @@ func (c *creator) Add(fn *Function) { func (c *creator) At(i int) *Function { return (*c)[i] } func (c *creator) Len() int { return len(*c) } -// CreatePackage constructs and returns an SSA Package from the +// CreatePackage creates and returns an SSA Package from the // specified type-checked, error-free file ASTs, and populates its // Members mapping. // @@ -190,36 +206,48 @@ func (c *creator) Len() int { return len(*c) } // subsequent call to ImportedPackage(pkg.Path()). // // The real work of building SSA form for each function is not done -// until a subsequent call to Package.Build(). +// until a subsequent call to Package.Build. +// +// CreatePackage should not be called after building any package in +// the program. func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { + // TODO(adonovan): assert that no package has yet been built. + if pkg == nil { + panic("nil pkg") // otherwise pkg.Scope below returns types.Universe! + } p := &Package{ Prog: prog, Members: make(map[string]Member), objects: make(map[types.Object]Member), Pkg: pkg, - info: info, // transient (CREATE and BUILD phases) - files: files, // transient (CREATE and BUILD phases) + syntax: info != nil, + // transient values (cleared after Package.Build) + info: info, + files: files, + initVersion: make(map[ast.Expr]string), } - // Add init() function. + /* synthesized package initializer */ p.init = &Function{ name: "init", Signature: new(types.Signature), Synthetic: "package initializer", Pkg: p, Prog: prog, + build: (*builder).buildPackageInit, info: p.info, + goversion: "", // See Package.build for details. } p.Members[p.init.name] = p.init p.created.Add(p.init) - // CREATE phase. // Allocate all package members: vars, funcs, consts and types. if len(files) > 0 { // Go source package. for _, file := range files { + goversion := versions.Lang(versions.FileVersions(p.info, file)) for _, decl := range file.Decls { - membersFromDecl(p, decl) + membersFromDecl(p, decl, goversion) } } } else { @@ -229,11 +257,11 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info * scope := p.Pkg.Scope() for _, name := range scope.Names() { obj := scope.Lookup(name) - memberFromObject(p, obj, nil) + memberFromObject(p, obj, nil, "") if obj, ok := obj.(*types.TypeName); ok { if named, ok := obj.Type().(*types.Named); ok { for i, n := 0, named.NumMethods(); i < n; i++ { - memberFromObject(p, named.Method(i), nil) + memberFromObject(p, named.Method(i), nil, "") } } } @@ -271,8 +299,8 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info * // printMu serializes printing of Packages/Functions to stdout. var printMu sync.Mutex -// AllPackages returns a new slice containing all packages in the -// program prog in unspecified order. +// AllPackages returns a new slice containing all packages created by +// prog.CreatePackage in in unspecified order. func (prog *Program) AllPackages() []*Package { pkgs := make([]*Package, 0, len(prog.packages)) for _, pkg := range prog.packages { @@ -294,6 +322,10 @@ func (prog *Program) AllPackages() []*Package { // false---yet this function remains very convenient. // Clients should use (*Program).Package instead where possible. // SSA doesn't really need a string-keyed map of packages. +// +// Furthermore, the graph of packages may contain multiple variants +// (e.g. "p" vs "p as compiled for q.test"), and each has a different +// view of its dependencies. func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] } diff --git a/vendor/golang.org/x/tools/go/ssa/doc.go b/vendor/golang.org/x/tools/go/ssa/doc.go index afda476b36..3310b5509b 100644 --- a/vendor/golang.org/x/tools/go/ssa/doc.go +++ b/vendor/golang.org/x/tools/go/ssa/doc.go @@ -7,8 +7,6 @@ // static single-assignment (SSA) form intermediate representation // (IR) for the bodies of functions. // -// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. -// // For an introduction to SSA form, see // http://en.wikipedia.org/wiki/Static_single_assignment_form. // This page provides a broader reading list: @@ -21,15 +19,15 @@ // All looping, branching and switching constructs are replaced with // unstructured control flow. Higher-level control flow constructs // such as multi-way branch can be reconstructed as needed; see -// ssautil.Switches() for an example. +// [golang.org/x/tools/go/ssa/ssautil.Switches] for an example. // // The simplest way to create the SSA representation of a package is -// to load typed syntax trees using golang.org/x/tools/go/packages, then -// invoke the ssautil.Packages helper function. See Example_loadPackages -// and Example_loadWholeProgram for examples. -// The resulting ssa.Program contains all the packages and their +// to load typed syntax trees using [golang.org/x/tools/go/packages], then +// invoke the [golang.org/x/tools/go/ssa/ssautil.Packages] helper function. +// (See the package-level Examples named LoadPackages and LoadWholeProgram.) +// The resulting [ssa.Program] contains all the packages and their // members, but SSA code is not created for function bodies until a -// subsequent call to (*Package).Build or (*Program).Build. +// subsequent call to [Package.Build] or [Program.Build]. // // The builder initially builds a naive SSA form in which all local // variables are addresses of stack locations with explicit loads and @@ -41,13 +39,13 @@ // // The primary interfaces of this package are: // -// - Member: a named member of a Go package. -// - Value: an expression that yields a value. -// - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph) +// - [Member]: a named member of a Go package. +// - [Value]: an expression that yields a value. +// - [Instruction]: a statement that consumes values and performs computation. +// - [Node]: a [Value] or [Instruction] (emphasizing its membership in the SSA value graph) // -// A computation that yields a result implements both the Value and -// Instruction interfaces. The following table shows for each +// A computation that yields a result implements both the [Value] and +// [Instruction] interfaces. The following table shows for each // concrete type which of these interfaces it implements. // // Value? Instruction? Member? @@ -66,7 +64,6 @@ // *FieldAddr ✔ ✔ // *FreeVar ✔ // *Function ✔ ✔ (func) -// *GenericConvert ✔ ✔ // *Global ✔ ✔ (var) // *Go ✔ // *If ✔ @@ -80,6 +77,7 @@ // *MakeMap ✔ ✔ // *MakeSlice ✔ ✔ // *MapUpdate ✔ +// *MultiConvert ✔ ✔ // *NamedConst ✔ (const) // *Next ✔ ✔ // *Panic ✔ @@ -97,15 +95,15 @@ // *TypeAssert ✔ ✔ // *UnOp ✔ ✔ // -// Other key types in this package include: Program, Package, Function -// and BasicBlock. +// Other key types in this package include: [Program], [Package], [Function] +// and [BasicBlock]. // // The program representation constructed by this package is fully // resolved internally, i.e. it does not rely on the names of Values, // Packages, Functions, Types or BasicBlocks for the correct // interpretation of the program. Only the identities of objects and // the topology of the SSA and type graphs are semantically -// significant. (There is one exception: Ids, used to identify field +// significant. (There is one exception: [types.Id] values, which identify field // and method names, contain strings.) Avoidance of name-based // operations simplifies the implementation of subsequent passes and // can make them very efficient. Many objects are nonetheless named @@ -113,11 +111,9 @@ // either accurate or unambiguous. The public API exposes a number of // name-based maps for client convenience. // -// The ssa/ssautil package provides various utilities that depend only -// on the public API of this package. -// -// TODO(adonovan): Consider the exceptional control-flow implications -// of defer and recover(). +// The [golang.org/x/tools/go/ssa/ssautil] package provides various +// helper functions, for example to simplify loading a Go program into +// SSA form. // // TODO(adonovan): write a how-to document for all the various cases // of trying to determine corresponding elements across the four diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go index 1731c79750..d77b4407a8 100644 --- a/vendor/golang.org/x/tools/go/ssa/emit.go +++ b/vendor/golang.org/x/tools/go/ssa/emit.go @@ -11,25 +11,60 @@ import ( "go/ast" "go/token" "go/types" - - "golang.org/x/tools/internal/typeparams" ) -// emitNew emits to f a new (heap Alloc) instruction allocating an -// object of type typ. pos is the optional source location. -func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { - v := &Alloc{Heap: true} +// emitAlloc emits to f a new Alloc instruction allocating a variable +// of type typ. +// +// The caller must set Alloc.Heap=true (for an heap-allocated variable) +// or add the Alloc to f.Locals (for a frame-allocated variable). +// +// During building, a variable in f.Locals may have its Heap flag +// set when it is discovered that its address is taken. +// These Allocs are removed from f.Locals at the end. +// +// The builder should generally call one of the emit{New,Local,LocalVar} wrappers instead. +func emitAlloc(f *Function, typ types.Type, pos token.Pos, comment string) *Alloc { + v := &Alloc{Comment: comment} v.setType(types.NewPointer(typ)) v.setPos(pos) f.emit(v) return v } +// emitNew emits to f a new Alloc instruction heap-allocating a +// variable of type typ. pos is the optional source location. +func emitNew(f *Function, typ types.Type, pos token.Pos, comment string) *Alloc { + alloc := emitAlloc(f, typ, pos, comment) + alloc.Heap = true + return alloc +} + +// emitLocal creates a local var for (t, pos, comment) and +// emits an Alloc instruction for it. +// +// (Use this function or emitNew for synthetic variables; +// for source-level variables, use emitLocalVar.) +func emitLocal(f *Function, t types.Type, pos token.Pos, comment string) *Alloc { + local := emitAlloc(f, t, pos, comment) + f.Locals = append(f.Locals, local) + return local +} + +// emitLocalVar creates a local var for v and emits an Alloc instruction for it. +// Subsequent calls to f.lookup(v) return it. +// It applies the appropriate generic instantiation to the type. +func emitLocalVar(f *Function, v *types.Var) *Alloc { + alloc := emitLocal(f, f.typ(v.Type()), v.Pos(), v.Name()) + f.vars[v] = alloc + return alloc +} + // emitLoad emits to f an instruction to load the address addr into a // new temporary, and returns the value so defined. func emitLoad(f *Function, addr Value) *UnOp { v := &UnOp{Op: token.MUL, X: addr} - v.setType(deref(typeparams.CoreType(addr.Type()))) + v.setType(mustDeref(addr.Type())) f.emit(v) return v } @@ -103,7 +138,7 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token. } // emitCompare emits to f code compute the boolean result of -// comparison comparison 'x op y'. +// comparison 'x op y'. func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { xt := x.Type().Underlying() yt := y.Type().Underlying() @@ -150,7 +185,7 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { // Precondition: neither argument is a named type. func isValuePreserving(ut_src, ut_dst types.Type) bool { // Identical underlying types? - if structTypesIdentical(ut_dst, ut_src) { + if types.IdenticalIgnoreTags(ut_dst, ut_src) { return true } @@ -208,6 +243,13 @@ func emitConv(f *Function, val Value, typ types.Type) Value { val = emitConv(f, val, types.Default(ut_src)) } + // Record the types of operands to MakeInterface, if + // non-parameterized, as they are the set of runtime types. + t := val.Type() + if f.typeparams.Len() == 0 || !f.Prog.parameterized.isParameterized(t) { + addRuntimeType(f.Prog, t) + } + mi := &MakeInterface{X: val} mi.setType(typ) return f.emit(mi) @@ -372,9 +414,10 @@ func emitTypeCoercion(f *Function, v Value, typ types.Type) Value { // emitStore emits to f an instruction to store value val at location // addr, applying implicit conversions as required by assignability rules. func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { + typ := mustDeref(addr.Type()) s := &Store{ Addr: addr, - Val: emitConv(f, val, deref(addr.Type())), + Val: emitConv(f, val, typ), pos: pos, } f.emit(s) @@ -477,9 +520,8 @@ func emitTailCall(f *Function, call *Call) { // value of a field. func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) Value { for _, index := range indices { - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) - - if isPointer(v.Type()) { + if st, vptr := deref(v.Type()); vptr { + fld := fieldOf(st, index) instr := &FieldAddr{ X: v, Field: index, @@ -488,10 +530,11 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) instr.setType(types.NewPointer(fld.Type())) v = f.emit(instr) // Load the field's value iff indirectly embedded. - if isPointer(fld.Type()) { + if _, fldptr := deref(fld.Type()); fldptr { v = emitLoad(f, v) } } else { + fld := fieldOf(v.Type(), index) instr := &Field{ X: v, Field: index, @@ -511,8 +554,8 @@ func emitImplicitSelections(f *Function, v Value, indices []int, pos token.Pos) // field's value. // Ident id is used for position and debug info. func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { - fld := typeparams.CoreType(deref(v.Type())).(*types.Struct).Field(index) - if isPointer(v.Type()) { + if st, vptr := deref(v.Type()); vptr { + fld := fieldOf(st, index) instr := &FieldAddr{ X: v, Field: index, @@ -525,6 +568,7 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. v = emitLoad(f, v) } } else { + fld := fieldOf(v.Type(), index) instr := &Field{ X: v, Field: index, @@ -537,17 +581,6 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. return v } -// zeroValue emits to f code to produce a zero value of type t, -// and returns it. -func zeroValue(f *Function, t types.Type) Value { - switch t.Underlying().(type) { - case *types.Struct, *types.Array: - return emitLoad(f, f.addLocal(t, token.NoPos)) - default: - return zeroConst(t) - } -} - // createRecoverBlock emits to f a block of code to return after a // recovered panic, and sets f.Recover to it. // @@ -577,7 +610,7 @@ func createRecoverBlock(f *Function) { T := R.At(i).Type() // Return zero value of each result type. - results = append(results, zeroValue(f, T)) + results = append(results, zeroConst(T)) } } f.emit(&Return{Results: results}) diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index 57f5f718f7..22f878d4ed 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -10,13 +10,10 @@ import ( "bytes" "fmt" "go/ast" - "go/token" "go/types" "io" "os" "strings" - - "golang.org/x/tools/internal/typeparams" ) // Like ObjectOf, but panics instead of returning nil. @@ -46,7 +43,7 @@ func (f *Function) typ(T types.Type) types.Type { // If id is an Instance, returns info.Instances[id].Type. // Otherwise returns f.typeOf(id). func (f *Function) instanceType(id *ast.Ident) types.Type { - if t, ok := typeparams.GetInstances(f.info)[id]; ok { + if t, ok := f.info.Instances[id]; ok { return t.Type } return f.typeOf(id) @@ -108,52 +105,40 @@ type lblock struct { // labelledBlock returns the branch target associated with the // specified label, creating it if needed. func (f *Function) labelledBlock(label *ast.Ident) *lblock { - obj := f.objectOf(label) + obj := f.objectOf(label).(*types.Label) lb := f.lblocks[obj] if lb == nil { lb = &lblock{_goto: f.newBasicBlock(label.Name)} if f.lblocks == nil { - f.lblocks = make(map[types.Object]*lblock) + f.lblocks = make(map[*types.Label]*lblock) } f.lblocks[obj] = lb } return lb } -// addParam adds a (non-escaping) parameter to f.Params of the -// specified name, type and source position. -func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter { - v := &Parameter{ - name: name, - typ: typ, - pos: pos, - parent: f, - } - f.Params = append(f.Params, v) - return v -} - -func (f *Function) addParamObj(obj types.Object) *Parameter { - name := obj.Name() +// addParamVar adds a parameter to f.Params. +func (f *Function) addParamVar(v *types.Var) *Parameter { + name := v.Name() if name == "" { name = fmt.Sprintf("arg%d", len(f.Params)) } - param := f.addParam(name, f.typ(obj.Type()), obj.Pos()) - param.object = obj + param := &Parameter{ + name: name, + object: v, + typ: f.typ(v.Type()), + parent: f, + } + f.Params = append(f.Params, param) return param } // addSpilledParam declares a parameter that is pre-spilled to the // stack; the function body will load/store the spilled location. // Subsequent lifting will eliminate spills where possible. -func (f *Function) addSpilledParam(obj types.Object) { - param := f.addParamObj(obj) - spill := &Alloc{Comment: obj.Name()} - spill.setType(types.NewPointer(param.Type())) - spill.setPos(obj.Pos()) - f.objects[obj] = spill - f.Locals = append(f.Locals, spill) - f.emit(spill) +func (f *Function) addSpilledParam(obj *types.Var) { + param := f.addParamVar(obj) + spill := emitLocalVar(f, obj) f.emit(&Store{Addr: spill, Val: param}) } @@ -161,7 +146,7 @@ func (f *Function) addSpilledParam(obj types.Object) { // Precondition: f.Type() already set. func (f *Function) startBody() { f.currentBlock = f.newBasicBlock("entry") - f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init + f.vars = make(map[*types.Var]Value) // needed for some synthetics, e.g. init } // createSyntacticParams populates f.Params and generates code (spills @@ -177,11 +162,11 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func if recv != nil { for _, field := range recv.List { for _, n := range field.Names { - f.addSpilledParam(f.info.Defs[n]) + f.addSpilledParam(identVar(f, n)) } // Anonymous receiver? No need to spill. if field.Names == nil { - f.addParamObj(f.Signature.Recv()) + f.addParamVar(f.Signature.Recv()) } } } @@ -191,11 +176,11 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func n := len(f.Params) // 1 if has recv, 0 otherwise for _, field := range functype.Params.List { for _, n := range field.Names { - f.addSpilledParam(f.info.Defs[n]) + f.addSpilledParam(identVar(f, n)) } // Anonymous parameter? No need to spill. if field.Names == nil { - f.addParamObj(f.Signature.Params().At(len(f.Params) - n)) + f.addParamVar(f.Signature.Params().At(len(f.Params) - n)) } } } @@ -205,7 +190,8 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func for _, field := range functype.Results.List { // Implicit "var" decl of locals for named results. for _, n := range field.Names { - f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) + namedResult := emitLocalVar(f, identVar(f, n)) + f.namedResults = append(f.namedResults, namedResult) } } } @@ -250,49 +236,14 @@ func buildReferrers(f *Function) { } } -// mayNeedRuntimeTypes returns all of the types in the body of fn that might need runtime types. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -func mayNeedRuntimeTypes(fn *Function) []types.Type { - // Collect all types that may need rtypes, i.e. those that flow into an interface. - var ts []types.Type - for _, bb := range fn.Blocks { - for _, instr := range bb.Instrs { - if mi, ok := instr.(*MakeInterface); ok { - ts = append(ts, mi.X.Type()) - } - } - } - - // Types that contain a parameterized type are considered to not be runtime types. - if fn.typeparams.Len() == 0 { - return ts // No potentially parameterized types. - } - // Filter parameterized types, in place. - fn.Prog.methodsMu.Lock() - defer fn.Prog.methodsMu.Unlock() - filtered := ts[:0] - for _, t := range ts { - if !fn.Prog.parameterized.isParameterized(t) { - filtered = append(filtered, t) - } - } - return filtered -} - // finishBody() finalizes the contents of the function after SSA code generation of its body. // // The function is not done being built until done() is called. func (f *Function) finishBody() { - f.objects = nil + f.vars = nil f.currentBlock = nil f.lblocks = nil - // Don't pin the AST in memory (except in debug mode). - if n := f.syntax; n != nil && !f.debugInfo() { - f.syntax = extentNode{n.Pos(), n.End()} - } - // Remove from f.Locals any Allocs that escape to the heap. j := 0 for _, l := range f.Locals { @@ -320,15 +271,15 @@ func (f *Function) finishBody() { lift(f) } - // clear remaining stateful variables + // clear remaining builder state f.namedResults = nil // (used by lifting) - f.info = nil f.subst = nil numberRegisters(f) // uses f.namedRegisters } -// After this, function is done with BUILD phase. +// done marks the building of f's SSA body complete, +// along with any nested functions, and optionally prints them. func (f *Function) done() { assert(f.parent == nil, "done called on an anonymous function") @@ -338,7 +289,7 @@ func (f *Function) done() { visit(anon) // anon is done building before f. } - f.built = true // function is done with BUILD phase + f.build = nil // function is built if f.Prog.mode&PrintFunctions != 0 { printMu.Lock() @@ -376,49 +327,35 @@ func (f *Function) removeNilBlocks() { // size of the instruction stream, and causes Functions to depend upon // the ASTs, potentially keeping them live in memory for longer. func (pkg *Package) SetDebugMode(debug bool) { - // TODO(adonovan): do we want ast.File granularity? pkg.debug = debug } // debugInfo reports whether debug info is wanted for this function. func (f *Function) debugInfo() bool { - return f.Pkg != nil && f.Pkg.debug -} - -// addNamedLocal creates a local variable, adds it to function f and -// returns it. Its name and type are taken from obj. Subsequent -// calls to f.lookup(obj) will return the same local. -func (f *Function) addNamedLocal(obj types.Object) *Alloc { - l := f.addLocal(obj.Type(), obj.Pos()) - l.Comment = obj.Name() - f.objects[obj] = l - return l -} - -func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { - return f.addNamedLocal(f.info.Defs[id]) -} - -// addLocal creates an anonymous local variable of type typ, adds it -// to function f and returns it. pos is the optional source location. -func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc { - typ = f.typ(typ) - v := &Alloc{} - v.setType(types.NewPointer(typ)) - v.setPos(pos) - f.Locals = append(f.Locals, v) - f.emit(v) - return v + // debug info for instantiations follows the debug info of their origin. + p := f.declaredPackage() + return p != nil && p.debug } // lookup returns the address of the named variable identified by obj // that is local to function f or one of its enclosing functions. // If escaping, the reference comes from a potentially escaping pointer // expression and the referent must be heap-allocated. -func (f *Function) lookup(obj types.Object, escaping bool) Value { - if v, ok := f.objects[obj]; ok { - if alloc, ok := v.(*Alloc); ok && escaping { - alloc.Heap = true +// We assume the referent is a *Alloc or *Phi. +// (The only Phis at this stage are those created directly by go1.22 "for" loops.) +func (f *Function) lookup(obj *types.Var, escaping bool) Value { + if v, ok := f.vars[obj]; ok { + if escaping { + switch v := v.(type) { + case *Alloc: + v.Heap = true + case *Phi: + for _, edge := range v.Edges { + if alloc, ok := edge.(*Alloc); ok { + alloc.Heap = true + } + } + } } return v // function-local var (address) } @@ -436,7 +373,7 @@ func (f *Function) lookup(obj types.Object, escaping bool) Value { outer: outer, parent: f, } - f.objects[obj] = v + f.vars[obj] = v f.FreeVars = append(f.FreeVars, v) return v } @@ -514,15 +451,15 @@ func (f *Function) relMethod(from *types.Package, recv types.Type) string { } // writeSignature writes to buf the signature sig in declaration syntax. -func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) { +func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature) { buf.WriteString("func ") if recv := sig.Recv(); recv != nil { buf.WriteString("(") - if n := params[0].Name(); n != "" { - buf.WriteString(n) + if name := recv.Name(); name != "" { + buf.WriteString(name) buf.WriteString(" ") } - types.WriteType(buf, params[0].Type(), types.RelativeTo(from)) + types.WriteType(buf, recv.Type(), types.RelativeTo(from)) buf.WriteString(") ") } buf.WriteString(name) @@ -534,7 +471,7 @@ func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *ty func (fn *Function) declaredPackage() *Package { switch { case fn.Pkg != nil: - return fn.Pkg // non-generic function + return fn.Pkg // non-generic function (does that follow??) case fn.topLevelOrigin != nil: return fn.topLevelOrigin.Pkg // instance of a named generic function case fn.parent != nil: @@ -594,10 +531,10 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { if len(f.Locals) > 0 { buf.WriteString("# Locals:\n") for i, l := range f.Locals { - fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from)) + fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(mustDeref(l.Type()), from)) } } - writeSignature(buf, from, f.Name(), f.Signature, f.Params) + writeSignature(buf, from, f.Name(), f.Signature) buf.WriteString(":\n") if f.Blocks == nil { @@ -687,17 +624,11 @@ func (prog *Program) NewFunction(name string, sig *types.Signature, provenance s return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} } -type extentNode [2]token.Pos - -func (n extentNode) Pos() token.Pos { return n[0] } -func (n extentNode) End() token.Pos { return n[1] } - -// Syntax returns an ast.Node whose Pos/End methods provide the -// lexical extent of the function if it was defined by Go source code -// (f.Synthetic==""), or nil otherwise. -// -// If f was built with debug information (see Package.SetDebugRef), -// the result is the *ast.FuncDecl or *ast.FuncLit that declared the -// function. Otherwise, it is an opaque Node providing only position -// information; this avoids pinning the AST in memory. +// Syntax returns the function's syntax (*ast.Func{Decl,Lit) +// if it was produced from syntax. func (f *Function) Syntax() ast.Node { return f.syntax } + +// identVar returns the variable defined by id. +func identVar(fn *Function, id *ast.Ident) *types.Var { + return fn.info.Defs[id].(*types.Var) +} diff --git a/vendor/golang.org/x/tools/go/ssa/identical.go b/vendor/golang.org/x/tools/go/ssa/identical.go deleted file mode 100644 index e8026967be..0000000000 --- a/vendor/golang.org/x/tools/go/ssa/identical.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.8 -// +build go1.8 - -package ssa - -import "go/types" - -var structTypesIdentical = types.IdenticalIgnoreTags diff --git a/vendor/golang.org/x/tools/go/ssa/identical_17.go b/vendor/golang.org/x/tools/go/ssa/identical_17.go deleted file mode 100644 index 575aa5dfc1..0000000000 --- a/vendor/golang.org/x/tools/go/ssa/identical_17.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.8 -// +build !go1.8 - -package ssa - -import "go/types" - -var structTypesIdentical = types.Identical diff --git a/vendor/golang.org/x/tools/go/ssa/instantiate.go b/vendor/golang.org/x/tools/go/ssa/instantiate.go index f6b2533f24..c155f6736a 100644 --- a/vendor/golang.org/x/tools/go/ssa/instantiate.go +++ b/vendor/golang.org/x/tools/go/ssa/instantiate.go @@ -6,130 +6,60 @@ package ssa import ( "fmt" - "go/ast" "go/types" + "sync" "golang.org/x/tools/internal/typeparams" ) -// _Instances returns all of the instances generated by runtime types for this function in an unspecified order. -// -// Thread-safe. -// -// This is an experimental interface! It may change without warning. -func (prog *Program) _Instances(fn *Function) []*Function { - if fn.typeparams.Len() == 0 || len(fn.typeargs) > 0 { - return nil - } - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - return prog.instances[fn].list() -} - -// A set of instantiations of a generic function fn. -type instanceSet struct { - fn *Function // fn.typeparams.Len() > 0 and len(fn.typeargs) == 0. - instances map[*typeList]*Function // canonical type arguments to an instance. - syntax *ast.FuncDecl // fn.syntax copy for instantiating after fn is done. nil on synthetic packages. - info *types.Info // fn.pkg.info copy for building after fn is done.. nil on synthetic packages. - - // TODO(taking): Consider ways to allow for clearing syntax and info when done building. - // May require a public API change as MethodValue can request these be built after prog.Build() is done. -} - -func (insts *instanceSet) list() []*Function { - if insts == nil { - return nil - } - - fns := make([]*Function, 0, len(insts.instances)) - for _, fn := range insts.instances { - fns = append(fns, fn) - } - return fns +// A generic records information about a generic origin function, +// including a cache of existing instantiations. +type generic struct { + instancesMu sync.Mutex + instances map[*typeList]*Function // canonical type arguments to an instance. } -// createInstanceSet adds a new instanceSet for a generic function fn if one does not exist. +// instance returns a Function that is the instantiation of generic +// origin function fn with the type arguments targs. // -// Precondition: fn is a package level declaration (function or method). +// Any created instance is added to cr. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu) -func (prog *Program) createInstanceSet(fn *Function) { - assert(fn.typeparams.Len() > 0 && len(fn.typeargs) == 0, "Can only create instance sets for generic functions") - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - syntax, _ := fn.syntax.(*ast.FuncDecl) - assert((syntax == nil) == (fn.syntax == nil), "fn.syntax is either nil or a *ast.FuncDecl") - - if _, ok := prog.instances[fn]; !ok { - prog.instances[fn] = &instanceSet{ - fn: fn, - syntax: syntax, - info: fn.info, +// Acquires fn.generic.instancesMu. +func (fn *Function) instance(targs []types.Type, cr *creator) *Function { + key := fn.Prog.canon.List(targs) + + gen := fn.generic + + gen.instancesMu.Lock() + defer gen.instancesMu.Unlock() + inst, ok := gen.instances[key] + if !ok { + inst = createInstance(fn, targs, cr) + if gen.instances == nil { + gen.instances = make(map[*typeList]*Function) } + gen.instances[key] = inst } + return inst } -// needsInstance returns a Function that is the instantiation of fn with the type arguments targs. -// -// Any CREATEd instance is added to cr. -// -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodMu) -func (prog *Program) needsInstance(fn *Function, targs []types.Type, cr *creator) *Function { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - return prog.lookupOrCreateInstance(fn, targs, cr) -} - -// lookupOrCreateInstance returns a Function that is the instantiation of fn with the type arguments targs. -// -// Any CREATEd instance is added to cr. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodMu) -func (prog *Program) lookupOrCreateInstance(fn *Function, targs []types.Type, cr *creator) *Function { - return prog.instances[fn].lookupOrCreate(targs, &prog.parameterized, cr) -} - -// lookupOrCreate returns the instantiation of insts.fn using targs. +// createInstance returns the instantiation of generic function fn using targs. // If the instantiation is created, this is added to cr. -func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWalker, cr *creator) *Function { - if insts.instances == nil { - insts.instances = make(map[*typeList]*Function) - } - - fn := insts.fn +// +// Requires fn.generic.instancesMu. +func createInstance(fn *Function, targs []types.Type, cr *creator) *Function { prog := fn.Prog - // canonicalize on a tuple of targs. Sig is not unique. - // - // func A[T any]() { - // var x T - // fmt.Println("%T", x) - // } - key := prog.canon.List(targs) - if inst, ok := insts.instances[key]; ok { - return inst - } - - // CREATE instance/instantiation wrapper - var syntax ast.Node - if insts.syntax != nil { - syntax = insts.syntax - } - + // Compute signature. var sig *types.Signature var obj *types.Func if recv := fn.Signature.Recv(); recv != nil { // method - m := fn.object.(*types.Func) - obj = prog.canon.instantiateMethod(m, targs, prog.ctxt) + obj = prog.canon.instantiateMethod(fn.object, targs, prog.ctxt) sig = obj.Type().(*types.Signature) } else { - instSig, err := typeparams.Instantiate(prog.ctxt, fn.Signature, targs, false) + // function + instSig, err := types.Instantiate(prog.ctxt, fn.Signature, targs, false) if err != nil { panic(err) } @@ -137,40 +67,48 @@ func (insts *instanceSet) lookupOrCreate(targs []types.Type, parameterized *tpWa if !ok { panic("Instantiate of a Signature returned a non-signature") } - obj = fn.object.(*types.Func) // instantiation does not exist yet + obj = fn.object // instantiation does not exist yet sig = prog.canon.Type(instance).(*types.Signature) } - var synthetic string - var subst *subster - - concrete := !parameterized.anyParameterized(targs) - - if prog.mode&InstantiateGenerics != 0 && concrete { + // Choose strategy (instance or wrapper). + var ( + synthetic string + subst *subster + build buildFunc + ) + if prog.mode&InstantiateGenerics != 0 && !prog.parameterized.anyParameterized(targs) { synthetic = fmt.Sprintf("instance of %s", fn.Name()) - subst = makeSubster(prog.ctxt, fn.typeparams, targs, false) + if fn.syntax != nil { + scope := typeparams.OriginMethod(obj).Scope() + subst = makeSubster(prog.ctxt, scope, fn.typeparams, targs, false) + build = (*builder).buildFromSyntax + } else { + build = (*builder).buildParamsOnly + } } else { synthetic = fmt.Sprintf("instantiation wrapper of %s", fn.Name()) + build = (*builder).buildInstantiationWrapper } - name := fmt.Sprintf("%s%s", fn.Name(), targs) // may not be unique + /* generic instance or instantiation wrapper */ instance := &Function{ - name: name, + name: fmt.Sprintf("%s%s", fn.Name(), targs), // may not be unique object: obj, Signature: sig, Synthetic: synthetic, - syntax: syntax, + syntax: fn.syntax, // \ + info: fn.info, // } empty for non-created packages + goversion: fn.goversion, // / + build: build, topLevelOrigin: fn, pos: obj.Pos(), Pkg: nil, Prog: fn.Prog, typeparams: fn.typeparams, // share with origin typeargs: targs, - info: insts.info, // on synthetic packages info is nil. subst: subst, } - cr.Add(instance) - insts.instances[key] = instance return instance } diff --git a/vendor/golang.org/x/tools/go/ssa/lift.go b/vendor/golang.org/x/tools/go/ssa/lift.go index 945536bbbf..da49fe9f17 100644 --- a/vendor/golang.org/x/tools/go/ssa/lift.go +++ b/vendor/golang.org/x/tools/go/ssa/lift.go @@ -41,11 +41,8 @@ package ssa import ( "fmt" "go/token" - "go/types" "math/big" "os" - - "golang.org/x/tools/internal/typeparams" ) // If true, show diagnostic information at each step of lifting. @@ -106,9 +103,14 @@ func buildDomFrontier(fn *Function) domFrontier { } func removeInstr(refs []Instruction, instr Instruction) []Instruction { + return removeInstrsIf(refs, func(i Instruction) bool { return i == instr }) +} + +func removeInstrsIf(refs []Instruction, p func(Instruction) bool) []Instruction { + // TODO(taking): replace with go1.22 slices.DeleteFunc. i := 0 for _, ref := range refs { - if ref == instr { + if p(ref) { continue } refs[i] = ref @@ -383,12 +385,6 @@ type newPhiMap map[*BasicBlock][]newPhi // // fresh is a source of fresh ids for phi nodes. func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool { - // TODO(taking): zero constants of aggregated types can now be lifted. - switch deref(alloc.Type()).Underlying().(type) { - case *types.Array, *types.Struct, *typeparams.TypeParam: - return false - } - // Don't lift named return values in functions that defer // calls that may recover from panic. if fn := alloc.Parent(); fn.Recover != nil { @@ -469,7 +465,7 @@ func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool *fresh++ phi.pos = alloc.Pos() - phi.setType(deref(alloc.Type())) + phi.setType(mustDeref(alloc.Type())) phi.block = v if debugLifting { fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v) @@ -514,7 +510,7 @@ func replaceAll(x, y Value) { func renamed(renaming []Value, alloc *Alloc) Value { v := renaming[alloc.index] if v == nil { - v = zeroConst(deref(alloc.Type())) + v = zeroConst(mustDeref(alloc.Type())) renaming[alloc.index] = v } return v diff --git a/vendor/golang.org/x/tools/go/ssa/lvalue.go b/vendor/golang.org/x/tools/go/ssa/lvalue.go index 51122b8e85..186cfcae70 100644 --- a/vendor/golang.org/x/tools/go/ssa/lvalue.go +++ b/vendor/golang.org/x/tools/go/ssa/lvalue.go @@ -25,7 +25,7 @@ type lvalue interface { // An address is an lvalue represented by a true pointer. type address struct { - addr Value + addr Value // must have a pointer core type. pos token.Pos // source position expr ast.Expr // source syntax of the value (not address) [debug mode] } @@ -52,7 +52,7 @@ func (a *address) address(fn *Function) Value { } func (a *address) typ() types.Type { - return deref(a.addr.Type()) + return mustDeref(a.addr.Type()) } // An element is an lvalue represented by m[k], the location of an diff --git a/vendor/golang.org/x/tools/go/ssa/methods.go b/vendor/golang.org/x/tools/go/ssa/methods.go index 4185618cdd..4797b39286 100644 --- a/vendor/golang.org/x/tools/go/ssa/methods.go +++ b/vendor/golang.org/x/tools/go/ssa/methods.go @@ -10,54 +10,124 @@ import ( "fmt" "go/types" + "golang.org/x/tools/go/types/typeutil" "golang.org/x/tools/internal/typeparams" ) // MethodValue returns the Function implementing method sel, building -// wrapper methods on demand. It returns nil if sel denotes an -// abstract (interface or parameterized) method. +// wrapper methods on demand. It returns nil if sel denotes an +// interface or generic method. // // Precondition: sel.Kind() == MethodVal. // // Thread-safe. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// Acquires prog.methodsMu. func (prog *Program) MethodValue(sel *types.Selection) *Function { if sel.Kind() != types.MethodVal { panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) } T := sel.Recv() if types.IsInterface(T) { - return nil // abstract method (interface, possibly type param) + return nil // interface method or type parameter } + + if prog.parameterized.isParameterized(T) { + return nil // generic method + } + if prog.mode&LogSource != 0 { defer logStack("MethodValue %s %v", T, sel)() } - var m *Function - b := builder{created: &creator{}} + var cr creator - prog.methodsMu.Lock() - // Checks whether a type param is reachable from T. - // This is an expensive check. May need to be optimized later. - if !prog.parameterized.isParameterized(T) { - m = prog.addMethod(prog.createMethodSet(T), sel, b.created) + m := func() *Function { + prog.methodsMu.Lock() + defer prog.methodsMu.Unlock() + + // Get or create SSA method set. + mset, ok := prog.methodSets.At(T).(*methodSet) + if !ok { + mset = &methodSet{mapping: make(map[string]*Function)} + prog.methodSets.Set(T, mset) + } + + // Get or create SSA method. + id := sel.Obj().Id() + fn, ok := mset.mapping[id] + if !ok { + obj := sel.Obj().(*types.Func) + _, ptrObj := deptr(recvType(obj)) + _, ptrRecv := deptr(T) + needsPromotion := len(sel.Index()) > 1 + needsIndirection := !ptrObj && ptrRecv + if needsPromotion || needsIndirection { + fn = createWrapper(prog, toSelection(sel), &cr) + } else { + fn = prog.objectMethod(obj, &cr) + } + if fn.Signature.Recv() == nil { + panic(fn) + } + mset.mapping[id] = fn + } + + return fn + }() + + b := builder{created: &cr} + b.iterate() + + return m +} + +// objectMethod returns the Function for a given method symbol. +// The symbol may be an instance of a generic function. It need not +// belong to an existing SSA package created by a call to +// prog.CreatePackage. +// +// objectMethod panics if the function is not a method. +// +// Acquires prog.objectMethodsMu. +func (prog *Program) objectMethod(obj *types.Func, cr *creator) *Function { + sig := obj.Type().(*types.Signature) + if sig.Recv() == nil { + panic("not a method: " + obj.String()) } - prog.methodsMu.Unlock() - if m == nil { - return nil // abstract method (generic) + // Belongs to a created package? + if fn := prog.FuncValue(obj); fn != nil { + return fn } - for !b.done() { - b.buildCreated() - b.needsRuntimeTypes() + + // Instantiation of generic? + if originObj := typeparams.OriginMethod(obj); originObj != obj { + origin := prog.objectMethod(originObj, cr) + assert(origin.typeparams.Len() > 0, "origin is not generic") + targs := receiverTypeArgs(obj) + return origin.instance(targs, cr) } - return m + + // Consult/update cache of methods created from types.Func. + prog.objectMethodsMu.Lock() + defer prog.objectMethodsMu.Unlock() + fn, ok := prog.objectMethods[obj] + if !ok { + fn = createFunction(prog, obj, obj.Name(), nil, nil, "", cr) + fn.Synthetic = "from type information (on demand)" + + if prog.objectMethods == nil { + prog.objectMethods = make(map[*types.Func]*Function) + } + prog.objectMethods[obj] = fn + } + return fn } // LookupMethod returns the implementation of the method of type T // identified by (pkg, name). It returns nil if the method exists but -// is abstract, and panics if T has no such method. +// is an interface method or generic method, and panics if T has no such method. func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) if sel == nil { @@ -68,205 +138,136 @@ func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) // methodSet contains the (concrete) methods of a concrete type (non-interface, non-parameterized). type methodSet struct { - mapping map[string]*Function // populated lazily - complete bool // mapping contains all methods -} - -// Precondition: T is a concrete type, e.g. !isInterface(T) and not parameterized. -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) createMethodSet(T types.Type) *methodSet { - if prog.mode&SanityCheckFunctions != 0 { - if types.IsInterface(T) || prog.parameterized.isParameterized(T) { - panic("type is interface or parameterized") - } - } - mset, ok := prog.methodSets.At(T).(*methodSet) - if !ok { - mset = &methodSet{mapping: make(map[string]*Function)} - prog.methodSets.Set(T, mset) - } - return mset -} - -// Adds any created functions to cr. -// Precondition: T is a concrete type, e.g. !isInterface(T) and not parameterized. -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) addMethod(mset *methodSet, sel *types.Selection, cr *creator) *Function { - if sel.Kind() == types.MethodExpr { - panic(sel) - } - id := sel.Obj().Id() - fn := mset.mapping[id] - if fn == nil { - sel := toSelection(sel) - obj := sel.obj.(*types.Func) - - needsPromotion := len(sel.index) > 1 - needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.recv) - if needsPromotion || needsIndirection { - fn = makeWrapper(prog, sel, cr) - } else { - fn = prog.originFunc(obj) - if fn.typeparams.Len() > 0 { // instantiate - targs := receiverTypeArgs(obj) - fn = prog.lookupOrCreateInstance(fn, targs, cr) - } - } - if fn.Signature.Recv() == nil { - panic(fn) // missing receiver - } - mset.mapping[id] = fn - } - return fn + mapping map[string]*Function // populated lazily } -// RuntimeTypes returns a new unordered slice containing all -// concrete types in the program for which a complete (non-empty) -// method set is required at run-time. +// RuntimeTypes returns a new unordered slice containing all types in +// the program for which a runtime type is required. +// +// A runtime type is required for any non-parameterized, non-interface +// type that is converted to an interface, or for any type (including +// interface types) derivable from one through reflection. +// +// The methods of such types may be reachable through reflection or +// interface calls even if they are never called directly. // // Thread-safe. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) +// Acquires prog.runtimeTypesMu. func (prog *Program) RuntimeTypes() []types.Type { - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - var res []types.Type - prog.methodSets.Iterate(func(T types.Type, v interface{}) { - if v.(*methodSet).complete { - res = append(res, T) - } - }) - return res -} - -// declaredFunc returns the concrete function/method denoted by obj. -// Panic ensues if there is none. -func (prog *Program) declaredFunc(obj *types.Func) *Function { - if v := prog.packageLevelMember(obj); v != nil { - return v.(*Function) - } - panic("no concrete method: " + obj.String()) + prog.runtimeTypesMu.Lock() + defer prog.runtimeTypesMu.Unlock() + return prog.runtimeTypes.Keys() } -// needMethodsOf ensures that runtime type information (including the -// complete method set) is available for the specified type T and all -// its subcomponents. -// -// needMethodsOf must be called for at least every type that is an -// operand of some MakeInterface instruction, and for the type of -// every exported package member. -// -// Adds any created functions to cr. -// -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// Precondition: T is not parameterized. -// -// Thread-safe. (Called via Package.build from multiple builder goroutines.) +// forEachReachable calls f for type T and each type reachable from +// its type through reflection. // -// TODO(adonovan): make this faster. It accounts for 20% of SSA build time. +// The function f must use memoization to break cycles and +// return false when the type has already been visited. // -// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -func (prog *Program) needMethodsOf(T types.Type, cr *creator) { - prog.methodsMu.Lock() - prog.needMethods(T, false, cr) - prog.methodsMu.Unlock() -} - -// Precondition: T is not a method signature (*Signature with Recv()!=nil). -// Precondition: T is not parameterized. -// Recursive case: skip => don't create methods for T. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func (prog *Program) needMethods(T types.Type, skip bool, cr *creator) { - // Each package maintains its own set of types it has visited. - if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { - // needMethods(T) was previously called - if !prevSkip || skip { - return // already seen, with same or false 'skip' value - } - } - prog.runtimeTypes.Set(T, skip) - - tmset := prog.MethodSets.MethodSet(T) - - if !skip && !types.IsInterface(T) && tmset.Len() > 0 { - // Create methods of T. - mset := prog.createMethodSet(T) - if !mset.complete { - mset.complete = true - n := tmset.Len() - for i := 0; i < n; i++ { - prog.addMethod(mset, tmset.At(i), cr) +// TODO(adonovan): publish in typeutil and share with go/callgraph/rta. +func forEachReachable(msets *typeutil.MethodSetCache, T types.Type, f func(types.Type) bool) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if !f(T) { + return } } - } - - // Recursion over signatures of each method. - for i := 0; i < tmset.Len(); i++ { - sig := tmset.At(i).Type().(*types.Signature) - prog.needMethods(sig.Params(), false, cr) - prog.needMethods(sig.Results(), false, cr) - } - switch t := T.(type) { - case *types.Basic: - // nop + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } - case *types.Interface: - // nop---handled by recursion over method set. + switch T := T.(type) { + case *types.Basic: + // nop - case *types.Pointer: - prog.needMethods(t.Elem(), false, cr) + case *types.Interface: + // nop---handled by recursion over method set. - case *types.Slice: - prog.needMethods(t.Elem(), false, cr) + case *types.Pointer: + visit(T.Elem(), false) - case *types.Chan: - prog.needMethods(t.Elem(), false, cr) + case *types.Slice: + visit(T.Elem(), false) - case *types.Map: - prog.needMethods(t.Key(), false, cr) - prog.needMethods(t.Elem(), false, cr) + case *types.Chan: + visit(T.Elem(), false) - case *types.Signature: - if t.Recv() != nil { - panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) - } - prog.needMethods(t.Params(), false, cr) - prog.needMethods(t.Results(), false, cr) - - case *types.Named: - // A pointer-to-named type can be derived from a named - // type via reflection. It may have methods too. - prog.needMethods(types.NewPointer(T), false, cr) - - // Consider 'type T struct{S}' where S has methods. - // Reflection provides no way to get from T to struct{S}, - // only to S, so the method set of struct{S} is unwanted, - // so set 'skip' flag during recursion. - prog.needMethods(t.Underlying(), true, cr) - - case *types.Array: - prog.needMethods(t.Elem(), false, cr) - - case *types.Struct: - for i, n := 0, t.NumFields(); i < n; i++ { - prog.needMethods(t.Field(i).Type(), false, cr) - } + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) - case *types.Tuple: - for i, n := 0, t.Len(); i < n; i++ { - prog.needMethods(t.At(i).Type(), false, cr) - } + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } - case *typeparams.TypeParam: - panic(T) // type parameters are always abstract. + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } - case *typeparams.Union: - // nop + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) - default: - panic(T) + default: + panic(T) + } } + visit(T, false) } diff --git a/vendor/golang.org/x/tools/go/ssa/parameterized.go b/vendor/golang.org/x/tools/go/ssa/parameterized.go index b11413c818..84db49d392 100644 --- a/vendor/golang.org/x/tools/go/ssa/parameterized.go +++ b/vendor/golang.org/x/tools/go/ssa/parameterized.go @@ -6,6 +6,7 @@ package ssa import ( "go/types" + "sync" "golang.org/x/tools/internal/typeparams" ) @@ -14,11 +15,24 @@ import ( // // NOTE: Adapted from go/types/infer.go. If that is exported in a future release remove this copy. type tpWalker struct { + mu sync.Mutex seen map[types.Type]bool } -// isParameterized returns true when typ contains any type parameters. -func (w *tpWalker) isParameterized(typ types.Type) (res bool) { +// isParameterized reports whether t recursively contains a type parameter. +// Thread-safe. +func (w *tpWalker) isParameterized(t types.Type) bool { + // TODO(adonovan): profile. If this operation is expensive, + // handle the most common but shallow cases such as T, pkg.T, + // *T without consulting the cache under the lock. + + w.mu.Lock() + defer w.mu.Unlock() + return w.isParameterizedLocked(t) +} + +// Requires w.mu. +func (w *tpWalker) isParameterizedLocked(typ types.Type) (res bool) { // NOTE: Adapted from go/types/infer.go. Try to keep in sync. // detect cycles @@ -35,25 +49,25 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { break case *types.Array: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Slice: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Struct: for i, n := 0, t.NumFields(); i < n; i++ { - if w.isParameterized(t.Field(i).Type()) { + if w.isParameterizedLocked(t.Field(i).Type()) { return true } } case *types.Pointer: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Tuple: n := t.Len() for i := 0; i < n; i++ { - if w.isParameterized(t.At(i).Type()) { + if w.isParameterizedLocked(t.At(i).Type()) { return true } } @@ -63,14 +77,14 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { // of a generic function type (or an interface method) that is // part of the type we're testing. We don't care about these type // parameters. - // Similarly, the receiver of a method may declare (rather then + // Similarly, the receiver of a method may declare (rather than // use) type parameters, we don't care about those either. // Thus, we only need to look at the input and result parameters. - return w.isParameterized(t.Params()) || w.isParameterized(t.Results()) + return w.isParameterizedLocked(t.Params()) || w.isParameterizedLocked(t.Results()) case *types.Interface: for i, n := 0, t.NumMethods(); i < n; i++ { - if w.isParameterized(t.Method(i).Type()) { + if w.isParameterizedLocked(t.Method(i).Type()) { return true } } @@ -79,30 +93,31 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { panic(err) } for _, term := range terms { - if w.isParameterized(term.Type()) { + if w.isParameterizedLocked(term.Type()) { return true } } case *types.Map: - return w.isParameterized(t.Key()) || w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Key()) || w.isParameterizedLocked(t.Elem()) case *types.Chan: - return w.isParameterized(t.Elem()) + return w.isParameterizedLocked(t.Elem()) case *types.Named: - args := typeparams.NamedTypeArgs(t) + args := t.TypeArgs() // TODO(taking): this does not match go/types/infer.go. Check with rfindley. - if params := typeparams.ForNamed(t); params.Len() > args.Len() { + if params := t.TypeParams(); params.Len() > args.Len() { return true } for i, n := 0, args.Len(); i < n; i++ { - if w.isParameterized(args.At(i)) { + if w.isParameterizedLocked(args.At(i)) { return true } } + return w.isParameterizedLocked(t.Underlying()) // recurse for types local to parameterized functions - case *typeparams.TypeParam: + case *types.TypeParam: return true default: @@ -112,9 +127,13 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) { return false } +// anyParameterized reports whether any element of ts is parameterized. +// Thread-safe. func (w *tpWalker) anyParameterized(ts []types.Type) bool { + w.mu.Lock() + defer w.mu.Unlock() for _, t := range ts { - if w.isParameterized(t) { + if w.isParameterizedLocked(t) { return true } } diff --git a/vendor/golang.org/x/tools/go/ssa/print.go b/vendor/golang.org/x/tools/go/ssa/print.go index 8b783196e4..727a735026 100644 --- a/vendor/golang.org/x/tools/go/ssa/print.go +++ b/vendor/golang.org/x/tools/go/ssa/print.go @@ -17,7 +17,6 @@ import ( "strings" "golang.org/x/tools/go/types/typeutil" - "golang.org/x/tools/internal/typeparams" ) // relName returns the name of v relative to i. @@ -51,7 +50,7 @@ func relType(t types.Type, from *types.Package) string { return s } -func relTerm(term *typeparams.Term, from *types.Package) string { +func relTerm(term *types.Term, from *types.Package) string { s := relType(term.Type(), from) if term.Tilde() { return "~" + s @@ -95,7 +94,7 @@ func (v *Alloc) String() string { op = "new" } from := v.Parent().relPkg() - return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) + return fmt.Sprintf("%s %s (%s)", op, relType(mustDeref(v.Type()), from), v.Comment) } func (v *Phi) String() string { @@ -259,21 +258,19 @@ func (v *MakeChan) String() string { } func (v *FieldAddr) String() string { - st := typeparams.CoreType(deref(v.X.Type())).(*types.Struct) // Be robust against a bad index. name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() + if fld := fieldOf(mustDeref(v.X.Type()), v.Field); fld != nil { + name = fld.Name() } return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) } func (v *Field) String() string { - st := typeparams.CoreType(v.X.Type()).(*types.Struct) // Be robust against a bad index. name := "?" - if 0 <= v.Field && v.Field < st.NumFields() { - name = st.Field(v.Field).Name() + if fld := fieldOf(v.X.Type(), v.Field); fld != nil { + name = fld.Name() } return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) } @@ -452,7 +449,7 @@ func WritePackage(buf *bytes.Buffer, p *Package) { case *Global: fmt.Fprintf(buf, " var %-*s %s\n", - maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from)) + maxname, name, relType(mustDeref(mem.Type()), from)) } } diff --git a/vendor/golang.org/x/tools/go/ssa/sanity.go b/vendor/golang.org/x/tools/go/ssa/sanity.go index 88ad374ded..22a3c6bc3d 100644 --- a/vendor/golang.org/x/tools/go/ssa/sanity.go +++ b/vendor/golang.org/x/tools/go/ssa/sanity.go @@ -8,6 +8,7 @@ package ssa // Currently it checks CFG invariants but little at the instruction level. import ( + "bytes" "fmt" "go/types" "io" @@ -131,6 +132,11 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *BinOp: case *Call: + if common := instr.Call; common.IsInvoke() { + if !types.IsInterface(common.Value.Type()) { + s.errorf("invoke on %s (%s) which is not an interface type (or type param)", common.Value, common.Value.Type()) + } + } case *ChangeInterface: case *ChangeType: case *SliceToArrayPointer: @@ -412,14 +418,17 @@ func (s *sanity) checkFunction(fn *Function) bool { s.errorf("nil Prog") } + var buf bytes.Buffer _ = fn.String() // must not crash _ = fn.RelString(fn.relPkg()) // must not crash + WriteFunction(&buf, fn) // must not crash // All functions have a package, except delegates (which are // shared across packages, or duplicated as weak symbols in a // separate-compilation model), and error.Error. if fn.Pkg == nil { - if strings.HasPrefix(fn.Synthetic, "wrapper ") || + if strings.HasPrefix(fn.Synthetic, "from type information (on demand)") || + strings.HasPrefix(fn.Synthetic, "wrapper ") || strings.HasPrefix(fn.Synthetic, "bound ") || strings.HasPrefix(fn.Synthetic, "thunk ") || strings.HasSuffix(fn.name, "Error") || diff --git a/vendor/golang.org/x/tools/go/ssa/source.go b/vendor/golang.org/x/tools/go/ssa/source.go index b9a08363ec..6700305bd9 100644 --- a/vendor/golang.org/x/tools/go/ssa/source.go +++ b/vendor/golang.org/x/tools/go/ssa/source.go @@ -121,7 +121,9 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // Don't call Program.Method: avoid creating wrappers. obj := mset.At(i).Obj().(*types.Func) if obj.Pos() == pos { - return pkg.objects[obj].(*Function) + // obj from MethodSet may not be the origin type. + m := typeparams.OriginMethod(obj) + return pkg.objects[m].(*Function) } } } @@ -170,16 +172,19 @@ func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { // --- Lookup functions for source-level named entities (types.Objects) --- // Package returns the SSA Package corresponding to the specified -// type-checker package object. -// It returns nil if no such SSA package has been created. -func (prog *Program) Package(obj *types.Package) *Package { - return prog.packages[obj] +// type-checker package. It returns nil if no such Package was +// created by a prior call to prog.CreatePackage. +func (prog *Program) Package(pkg *types.Package) *Package { + return prog.packages[pkg] } -// packageLevelMember returns the package-level member corresponding to -// the specified named object, which may be a package-level const -// (*NamedConst), var (*Global) or func (*Function) of some package in -// prog. It returns nil if the object is not found. +// packageLevelMember returns the package-level member corresponding +// to the specified symbol, which may be a package-level const +// (*NamedConst), var (*Global) or func/method (*Function) of some +// package in prog. +// +// It returns nil if the object belongs to a package that has not been +// created by prog.CreatePackage. func (prog *Program) packageLevelMember(obj types.Object) Member { if pkg, ok := prog.packages[obj.Pkg()]; ok { return pkg.objects[obj] @@ -187,24 +192,16 @@ func (prog *Program) packageLevelMember(obj types.Object) Member { return nil } -// originFunc returns the package-level generic function that is the -// origin of obj. If returns nil if the generic function is not found. -func (prog *Program) originFunc(obj *types.Func) *Function { - return prog.declaredFunc(typeparams.OriginMethod(obj)) -} - -// FuncValue returns the concrete Function denoted by the source-level -// named function obj, or nil if obj denotes an interface method. -// -// TODO(adonovan): check the invariant that obj.Type() matches the -// result's Signature, both in the params/results and in the receiver. +// FuncValue returns the SSA function or (non-interface) method +// denoted by the specified func symbol. It returns nil id the symbol +// denotes an interface method, or belongs to a package that was not +// created by prog.CreatePackage. func (prog *Program) FuncValue(obj *types.Func) *Function { fn, _ := prog.packageLevelMember(obj).(*Function) return fn } -// ConstValue returns the SSA Value denoted by the source-level named -// constant obj. +// ConstValue returns the SSA constant denoted by the specified const symbol. func (prog *Program) ConstValue(obj *types.Const) *Const { // TODO(adonovan): opt: share (don't reallocate) // Consts for const objects and constant ast.Exprs. @@ -221,7 +218,7 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { } // VarValue returns the SSA Value that corresponds to a specific -// identifier denoting the source-level named variable obj. +// identifier denoting the specified var symbol. // // VarValue returns nil if a local variable was not found, perhaps // because its package was not built, the debug information was not diff --git a/vendor/golang.org/x/tools/go/ssa/ssa.go b/vendor/golang.org/x/tools/go/ssa/ssa.go index 5904b817b3..30bf4bc677 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssa.go +++ b/vendor/golang.org/x/tools/go/ssa/ssa.go @@ -23,20 +23,25 @@ import ( type Program struct { Fset *token.FileSet // position information for the files of this Program imported map[string]*Package // all importable Packages, keyed by import path - packages map[*types.Package]*Package // all loaded Packages, keyed by object + packages map[*types.Package]*Package // all created Packages mode BuilderMode // set of mode bits for SSA construction MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets - canon *canonizer // type canonicalization map - ctxt *typeparams.Context // cache for type checking instantiations + canon *canonizer // type canonicalization map + ctxt *types.Context // cache for type checking instantiations - methodsMu sync.Mutex // guards the following maps: - methodSets typeutil.Map // maps type to its concrete methodSet - runtimeTypes typeutil.Map // types for which rtypes are needed - bounds map[boundsKey]*Function // bounds for curried x.Method closures - thunks map[selectionKey]*Function // thunks for T.Method expressions - instances map[*Function]*instanceSet // instances of generic functions - parameterized tpWalker // determines whether a type is parameterized. + methodsMu sync.Mutex + methodSets typeutil.Map // maps type to its concrete *methodSet + + parameterized tpWalker // memoization of whether a type refers to type parameters + + runtimeTypesMu sync.Mutex + runtimeTypes typeutil.Map // set of runtime types (from MakeInterface) + + // objectMethods is a memoization of objectMethod + // to avoid creation of duplicate methods from type information. + objectMethodsMu sync.Mutex + objectMethods map[*types.Func]*Function } // A Package is a single analyzed Go package containing Members for @@ -51,17 +56,19 @@ type Package struct { Prog *Program // the owning program Pkg *types.Package // the corresponding go/types.Package Members map[string]Member // all package members keyed by name (incl. init and init#%d) - objects map[types.Object]Member // mapping of package objects to members (incl. methods). Contains *NamedConst, *Global, *Function. + objects map[types.Object]Member // mapping of package objects to members (incl. methods). Contains *NamedConst, *Global, *Function (values but not types) init *Function // Func("init"); the package's init function debug bool // include full debug info in this package + syntax bool // package was loaded from syntax // The following fields are set transiently, then cleared // after building. - buildOnce sync.Once // ensures package building occurs once - ninit int32 // number of init functions - info *types.Info // package type information - files []*ast.File // package ASTs - created creator // members created as a result of building this package (includes declared functions, wrappers) + buildOnce sync.Once // ensures package building occurs once + ninit int32 // number of init functions + info *types.Info // package type information + files []*ast.File // package ASTs + created creator // members created as a result of building this package (includes declared functions, wrappers) + initVersion map[ast.Expr]string // goversion to use for each global var init expr } // A Member is a member of a Go package, implemented by *NamedConst, @@ -258,8 +265,8 @@ type Node interface { // or method. // // If Blocks is nil, this indicates an external function for which no -// Go source code is available. In this case, FreeVars and Locals -// are nil too. Clients performing whole-program analysis must +// Go source code is available. In this case, FreeVars, Locals, and +// Params are nil too. Clients performing whole-program analysis must // handle external functions specially. // // Blocks contains the function's control-flow graph (CFG). @@ -296,8 +303,8 @@ type Node interface { // // A generic function is a function or method that has uninstantiated type // parameters (TypeParams() != nil). Consider a hypothetical generic -// method, (*Map[K,V]).Get. It may be instantiated with all ground -// (non-parameterized) types as (*Map[string,int]).Get or with +// method, (*Map[K,V]).Get. It may be instantiated with all +// non-parameterized types as (*Map[string,int]).Get or with // parameterized types as (*Map[string,U]).Get, where U is a type parameter. // In both instantiations, Origin() refers to the instantiated generic // method, (*Map[K,V]).Get, TypeParams() refers to the parameters [K,V] of @@ -305,39 +312,45 @@ type Node interface { // respectively, and is nil in the generic method. type Function struct { name string - object types.Object // a declared *types.Func or one of its wrappers - method *selection // info about provenance of synthetic methods; thunk => non-nil + object *types.Func // symbol for declared function (nil for FuncLit or synthetic init) + method *selection // info about provenance of synthetic methods; thunk => non-nil Signature *types.Signature pos token.Pos - Synthetic string // provenance of synthetic function; "" for true source functions - syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode - parent *Function // enclosing function if anon; nil if global - Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) - Prog *Program // enclosing program + // source information + Synthetic string // provenance of synthetic function; "" for true source functions + syntax ast.Node // *ast.Func{Decl,Lit}, if from syntax (incl. generic instances) + info *types.Info // type annotations (iff syntax != nil) + goversion string // Go version of syntax (NB: init is special) + + build buildFunc // algorithm to build function body (nil => built) + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + + // These fields are populated only when the function body is built: + Params []*Parameter // function parameters; for methods, includes receiver FreeVars []*FreeVar // free variables whose values must be supplied by closure - Locals []*Alloc // local variables of this function + Locals []*Alloc // frame-allocated variables of this function Blocks []*BasicBlock // basic blocks of the function; nil => external Recover *BasicBlock // optional; control transfers here after recovered panic AnonFuncs []*Function // anonymous functions directly beneath this one referrers []Instruction // referring instructions (iff Parent() != nil) - built bool // function has completed both CREATE and BUILD phase. anonIdx int32 // position of a nested function in parent's AnonFuncs. fn.Parent()!=nil => fn.Parent().AnonFunc[fn.anonIdx] == fn. - typeparams *typeparams.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function - typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function - topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil. + typeparams *types.TypeParamList // type parameters of this function. typeparams.Len() > 0 => generic or instance of generic function + typeargs []types.Type // type arguments that instantiated typeparams. len(typeargs) > 0 => instance of generic function + topLevelOrigin *Function // the origin function if this is an instance of a source function. nil if Parent()!=nil. + generic *generic // instances of this function, if generic - // The following fields are set transiently during building, - // then cleared. + // The following fields are cleared after building. currentBlock *BasicBlock // where to emit code - objects map[types.Object]Value // addresses of local variables + vars map[*types.Var]Value // addresses of local variables namedResults []*Alloc // tuple of named results targets *targets // linked stack of branch targets - lblocks map[types.Object]*lblock // labelled blocks - info *types.Info // *types.Info to build from. nil for wrappers. - subst *subster // non-nil => expand generic body using this type substitution of ground types + lblocks map[*types.Label]*lblock // labelled blocks + subst *subster // type parameter substitutions (if non-nil) } // BasicBlock represents an SSA basic block. @@ -402,9 +415,8 @@ type FreeVar struct { // A Parameter represents an input parameter of a function. type Parameter struct { name string - object types.Object // a *types.Var; nil for non-source locals + object *types.Var // non-nil typ types.Type - pos token.Pos parent *Function referrers []Instruction } @@ -482,15 +494,12 @@ type Builtin struct { // type of the allocated variable is actually // Type().Underlying().(*types.Pointer).Elem(). // -// If Heap is false, Alloc allocates space in the function's -// activation record (frame); we refer to an Alloc(Heap=false) as a -// "local" alloc. Each local Alloc returns the same address each time -// it is executed within the same activation; the space is -// re-initialized to zero. +// If Heap is false, Alloc zero-initializes the same local variable in +// the call frame and returns its address; in this case the Alloc must +// be present in Function.Locals. We call this a "local" alloc. // -// If Heap is true, Alloc allocates space in the heap; we -// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc -// returns a different address each time it is executed. +// If Heap is true, Alloc allocates a new zero-initialized variable +// each time the instruction is executed. We call this a "new" alloc. // // When Alloc is applied to a channel, map or slice type, it returns // the address of an uninitialized (nil) reference of that kind; store @@ -681,8 +690,8 @@ type Convert struct { type MultiConvert struct { register X Value - from []*typeparams.Term - to []*typeparams.Term + from []*types.Term + to []*types.Term } // ChangeInterface constructs a value of one interface type from a @@ -865,7 +874,7 @@ type Slice struct { type FieldAddr struct { register X Value // *struct - Field int // field is typeparams.CoreType(X.Type().Underlying().(*types.Pointer).Elem()).(*types.Struct).Field(Field) + Field int // index into CoreType(CoreType(X.Type()).(*types.Pointer).Elem()).(*types.Struct).Fields } // The Field instruction yields the Field of struct X. @@ -884,7 +893,7 @@ type FieldAddr struct { type Field struct { register X Value // struct - Field int // index into typeparams.CoreType(X.Type()).(*types.Struct).Fields + Field int // index into CoreType(X.Type()).(*types.Struct).Fields } // The IndexAddr instruction yields the address of the element at @@ -1068,11 +1077,12 @@ type Next struct { // Type() reflects the actual type of the result, possibly a // 2-types.Tuple; AssertedType is the asserted type. // -// Pos() returns the ast.CallExpr.Lparen if the instruction arose from -// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the -// instruction arose from an explicit e.(T) operation; or the -// ast.CaseClause.Case if the instruction arose from a case of a -// type-switch statement. +// Depending on the TypeAssert's purpose, Pos may return: +// - the ast.CallExpr.Lparen of an explicit T(e) conversion; +// - the ast.TypeAssertExpr.Lparen of an explicit e.(T) operation; +// - the ast.CaseClause.Case of a case of a type-switch statement; +// - the Ident(m).NamePos of an interface method value i.m +// (for which TypeAssert may be used to effect the nil check). // // Example printed form: // @@ -1390,7 +1400,7 @@ type anInstruction struct { // represents a dynamically dispatched call to an interface method. // In this mode, Value is the interface value and Method is the // interface's abstract method. The interface value may be a type -// parameter. Note: an abstract method may be shared by multiple +// parameter. Note: an interface method may be shared by multiple // interfaces due to embedding; Value.Type() provides the specific // interface used for this call. // @@ -1408,7 +1418,7 @@ type anInstruction struct { // the last element of Args is a slice. type CallCommon struct { Value Value // receiver (invoke mode) or func value (call mode) - Method *types.Func // abstract method (invoke mode) + Method *types.Func // interface method (invoke mode) Args []Value // actual parameters (in static method call, includes receiver) pos token.Pos // position of CallExpr.Lparen, iff explicit in source } @@ -1507,14 +1517,19 @@ func (v *Global) String() string { return v.RelString(nil) func (v *Global) Package() *Package { return v.Pkg } func (v *Global) RelString(from *types.Package) string { return relString(v, from) } -func (v *Function) Name() string { return v.name } -func (v *Function) Type() types.Type { return v.Signature } -func (v *Function) Pos() token.Pos { return v.pos } -func (v *Function) Token() token.Token { return token.FUNC } -func (v *Function) Object() types.Object { return v.object } -func (v *Function) String() string { return v.RelString(nil) } -func (v *Function) Package() *Package { return v.Pkg } -func (v *Function) Parent() *Function { return v.parent } +func (v *Function) Name() string { return v.name } +func (v *Function) Type() types.Type { return v.Signature } +func (v *Function) Pos() token.Pos { return v.pos } +func (v *Function) Token() token.Token { return token.FUNC } +func (v *Function) Object() types.Object { + if v.object != nil { + return types.Object(v.object) + } + return nil +} +func (v *Function) String() string { return v.RelString(nil) } +func (v *Function) Package() *Package { return v.Pkg } +func (v *Function) Parent() *Function { return v.parent } func (v *Function) Referrers() *[]Instruction { if v.parent != nil { return &v.referrers @@ -1524,10 +1539,7 @@ func (v *Function) Referrers() *[]Instruction { // TypeParams are the function's type parameters if generic or the // type parameters that were instantiated if fn is an instantiation. -// -// TODO(taking): declare result type as *types.TypeParamList -// after we drop support for go1.17. -func (fn *Function) TypeParams() *typeparams.TypeParamList { +func (fn *Function) TypeParams() *types.TypeParamList { return fn.typeparams } @@ -1535,12 +1547,25 @@ func (fn *Function) TypeParams() *typeparams.TypeParamList { // from fn.Origin(). func (fn *Function) TypeArgs() []types.Type { return fn.typeargs } -// Origin is the function fn is an instantiation of. Returns nil if fn is not -// an instantiation. +// Origin returns the generic function from which fn was instantiated, +// or nil if fn is not an instantiation. func (fn *Function) Origin() *Function { if fn.parent != nil && len(fn.typeargs) > 0 { - // Nested functions are BUILT at a different time than there instances. - return fn.parent.Origin().AnonFuncs[fn.anonIdx] + // Nested functions are BUILT at a different time than their instances. + // Build declared package if not yet BUILT. This is not an expected use + // case, but is simple and robust. + fn.declaredPackage().Build() + } + return origin(fn) +} + +// origin is the function that fn is an instantiation of. Returns nil if fn is +// not an instantiation. +// +// Precondition: fn and the origin function are done building. +func origin(fn *Function) *Function { + if fn.parent != nil && len(fn.typeargs) > 0 { + return origin(fn.parent).AnonFuncs[fn.anonIdx] } return fn.topLevelOrigin } @@ -1549,7 +1574,7 @@ func (v *Parameter) Type() types.Type { return v.typ } func (v *Parameter) Name() string { return v.name } func (v *Parameter) Object() types.Object { return v.object } func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } -func (v *Parameter) Pos() token.Pos { return v.pos } +func (v *Parameter) Pos() token.Pos { return v.object.Pos() } func (v *Parameter) Parent() *Function { return v.parent } func (v *Alloc) Type() types.Type { return v.typ } diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go index 96d69a20a1..3daa67a07e 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/load.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/load.go @@ -14,14 +14,14 @@ import ( "golang.org/x/tools/go/loader" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/ssa" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/versions" ) // Packages creates an SSA program for a set of packages. // // The packages must have been loaded from source syntax using the -// golang.org/x/tools/go/packages.Load function in LoadSyntax or -// LoadAllSyntax mode. +// [packages.Load] function in [packages.LoadSyntax] or +// [packages.LoadAllSyntax] mode. // // Packages creates an SSA package for each well-typed package in the // initial list, plus all their dependencies. The resulting list of @@ -29,12 +29,30 @@ import ( // a nil if SSA code could not be constructed for the corresponding initial // package due to type errors. // -// Code for bodies of functions is not built until Build is called on -// the resulting Program. SSA code is constructed only for the initial -// packages with well-typed syntax trees. +// Code for bodies of functions is not built until [Program.Build] is +// called on the resulting Program. SSA code is constructed only for +// the initial packages with well-typed syntax trees. // // The mode parameter controls diagnostics and checking during SSA construction. func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, []*ssa.Package) { + // TODO(adonovan): opt: this calls CreatePackage far more than + // necessary: for all dependencies, not just the (non-initial) + // direct dependencies of the initial packages. + // + // But can it reasonably be changed without breaking the + // spirit and/or letter of the law above? Clients may notice + // if we call CreatePackage less, as methods like + // Program.FuncValue will return nil. Or must we provide a new + // function (and perhaps deprecate this one)? Is it worth it? + // + // Tim King makes the interesting point that it would be + // possible to entirely alleviate the client from the burden + // of calling CreatePackage for non-syntax packages, if we + // were to treat vars and funcs lazily in the same way we now + // treat methods. (In essence, try to move away from the + // notion of ssa.Packages, and make the Program answer + // all reasonable questions about any types.Object.) + return doPackages(initial, mode, false) } @@ -42,7 +60,7 @@ func Packages(initial []*packages.Package, mode ssa.BuilderMode) (*ssa.Program, // their dependencies. // // The packages must have been loaded from source syntax using the -// golang.org/x/tools/go/packages.Load function in LoadAllSyntax mode. +// [packages.Load] function in [packages.LoadAllSyntax] mode. // // AllPackages creates an SSA package for each well-typed package in the // initial list, plus all their dependencies. The resulting list of @@ -102,7 +120,7 @@ func doPackages(initial []*packages.Package, mode ssa.BuilderMode, deps bool) (* // // The mode parameter controls diagnostics and checking during SSA construction. // -// Deprecated: Use golang.org/x/tools/go/packages and the Packages +// Deprecated: Use [golang.org/x/tools/go/packages] and the [Packages] // function instead; see ssa.Example_loadPackages. func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { prog := ssa.NewProgram(lprog.Fset, mode) @@ -116,16 +134,17 @@ func CreateProgram(lprog *loader.Program, mode ssa.BuilderMode) *ssa.Program { return prog } -// BuildPackage builds an SSA program with IR for a single package. +// BuildPackage builds an SSA program with SSA intermediate +// representation (IR) for all functions of a single package. // -// It populates pkg by type-checking the specified file ASTs. All +// It populates pkg by type-checking the specified file syntax trees. All // dependencies are loaded using the importer specified by tc, which // typically loads compiler export data; SSA code cannot be built for -// those packages. BuildPackage then constructs an ssa.Program with all +// those packages. BuildPackage then constructs an [ssa.Program] with all // dependency packages created, and builds and returns the SSA package // corresponding to pkg. // -// The caller must have set pkg.Path() to the import path. +// The caller must have set pkg.Path to the import path. // // The operation fails if there were any type-checking or import errors. // @@ -143,10 +162,11 @@ func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, fil Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(info) + versions.InitFileVersions(info) if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { return nil, nil, err } @@ -168,6 +188,25 @@ func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, fil } createAll(pkg.Imports()) + // TODO(adonovan): we could replace createAll with just: + // + // // Create SSA packages for all imports. + // for _, p := range pkg.Imports() { + // prog.CreatePackage(p, nil, nil, true) + // } + // + // (with minor changes to changes to ../builder_test.go as + // shown in CL 511715 PS 10.) But this would strictly violate + // the letter of the doc comment above, which says "all + // dependencies created". + // + // Tim makes the good point with some extra work we could + // remove the need for any CreatePackage calls except the + // ones with syntax (i.e. primary packages). Of course + // You wouldn't have ssa.Packages and Members for as + // many things but no-one really uses that anyway. + // I wish I had done this from the outset. + // Create and build the primary package. ssapkg := prog.CreatePackage(pkg, files, info, false) ssapkg.Build() diff --git a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go index 5f27050b02..b4feb42cb3 100644 --- a/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go +++ b/vendor/golang.org/x/tools/go/ssa/ssautil/visit.go @@ -4,7 +4,14 @@ package ssautil // import "golang.org/x/tools/go/ssa/ssautil" -import "golang.org/x/tools/go/ssa" +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/go/ssa" + + _ "unsafe" // for linkname hack +) // This file defines utilities for visiting the SSA representation of // a Program. @@ -18,50 +25,113 @@ import "golang.org/x/tools/go/ssa" // synthetic wrappers. // // Precondition: all packages are built. +// +// TODO(adonovan): this function is underspecified. It doesn't +// actually work like a linker, which computes reachability from main +// using something like go/callgraph/cha (without materializing the +// call graph). In fact, it treats all public functions and all +// methods of public non-parameterized types as roots, even though +// they may be unreachable--but only in packages created from syntax. +// +// I think we should deprecate AllFunctions function in favor of two +// clearly defined ones: +// +// 1. The first would efficiently compute CHA reachability from a set +// of main packages, making it suitable for a whole-program +// analysis context with InstantiateGenerics, in conjunction with +// Program.Build. +// +// 2. The second would return only the set of functions corresponding +// to source Func{Decl,Lit} syntax, like SrcFunctions in +// go/analysis/passes/buildssa; this is suitable for +// package-at-a-time (or handful of packages) context. +// ssa.Package could easily expose it as a field. +// +// We could add them unexported for now and use them via the linkname hack. func AllFunctions(prog *ssa.Program) map[*ssa.Function]bool { - visit := visitor{ - prog: prog, - seen: make(map[*ssa.Function]bool), + seen := make(map[*ssa.Function]bool) + + var function func(fn *ssa.Function) + function = func(fn *ssa.Function) { + if !seen[fn] { + seen[fn] = true + var buf [10]*ssa.Value // avoid alloc in common case + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ssa.Function); ok { + function(fn) + } + } + } + } + } } - visit.program() - return visit.seen -} -type visitor struct { - prog *ssa.Program - seen map[*ssa.Function]bool -} + // TODO(adonovan): opt: provide a way to share a builder + // across a sequence of MethodValue calls. -func (visit *visitor) program() { - for _, pkg := range visit.prog.AllPackages() { - for _, mem := range pkg.Members { - if fn, ok := mem.(*ssa.Function); ok { - visit.function(fn) + methodsOf := func(T types.Type) { + if !types.IsInterface(T) { + mset := prog.MethodSets.MethodSet(T) + for i := 0; i < mset.Len(); i++ { + function(prog.MethodValue(mset.At(i))) } } } - for _, T := range visit.prog.RuntimeTypes() { - mset := visit.prog.MethodSets.MethodSet(T) - for i, n := 0, mset.Len(); i < n; i++ { - visit.function(visit.prog.MethodValue(mset.At(i))) + + // Historically, Program.RuntimeTypes used to include the type + // of any exported member of a package loaded from syntax that + // has a non-parameterized type, plus all types + // reachable from that type using reflection, even though + // these runtime types may not be required for them. + // + // Rather than break existing programs that rely on + // AllFunctions visiting extra methods that are unreferenced + // by IR and unreachable via reflection, we moved the logic + // here, unprincipled though it is. + // (See doc comment for better ideas.) + // + // Nonetheless, after the move, we no longer visit every + // method of any type recursively reachable from T, only the + // methods of T and *T themselves, and we only apply this to + // named types T, and not to the type of every exported + // package member. + exportedTypeHack := func(t *ssa.Type) { + if isSyntactic(t.Package()) && + ast.IsExported(t.Name()) && + !types.IsInterface(t.Type()) { + // Consider only named types. + // (Ignore aliases and unsafe.Pointer.) + if named, ok := t.Type().(*types.Named); ok { + if named.TypeParams() == nil { + methodsOf(named) // T + methodsOf(types.NewPointer(named)) // *T + } + } } } -} -func (visit *visitor) function(fn *ssa.Function) { - if !visit.seen[fn] { - visit.seen[fn] = true - var buf [10]*ssa.Value // avoid alloc in common case - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - for _, op := range instr.Operands(buf[:0]) { - if fn, ok := (*op).(*ssa.Function); ok { - visit.function(fn) - } - } + for _, pkg := range prog.AllPackages() { + for _, mem := range pkg.Members { + switch mem := mem.(type) { + case *ssa.Function: + // Visit all package-level declared functions. + function(mem) + + case *ssa.Type: + exportedTypeHack(mem) } } } + + // Visit all methods of types for which runtime types were + // materialized, as they are reachable through reflection. + for _, T := range prog.RuntimeTypes() { + methodsOf(T) + } + + return seen } // MainPackages returns the subset of the specified packages @@ -76,3 +146,12 @@ func MainPackages(pkgs []*ssa.Package) []*ssa.Package { } return mains } + +// TODO(adonovan): propose a principled API for this. One possibility +// is a new field, Package.SrcFunctions []*Function, which would +// contain the list of SrcFunctions described in point 2 of the +// AllFunctions doc comment, or nil if the package is not from syntax. +// But perhaps overloading nil vs empty slice is too subtle. +// +//go:linkname isSyntactic golang.org/x/tools/go/ssa.isSyntactic +func isSyntactic(pkg *ssa.Package) bool diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go index d7f8ae4a70..a9a6d41e81 100644 --- a/vendor/golang.org/x/tools/go/ssa/subst.go +++ b/vendor/golang.org/x/tools/go/ssa/subst.go @@ -5,10 +5,7 @@ package ssa import ( - "fmt" "go/types" - - "golang.org/x/tools/internal/typeparams" ) // Type substituter for a fixed set of replacement types. @@ -19,41 +16,42 @@ import ( // // Not concurrency-safe. type subster struct { + replacements map[*types.TypeParam]types.Type // values should contain no type params + cache map[types.Type]types.Type // cache of subst results + ctxt *types.Context // cache for instantiation + scope *types.Scope // *types.Named declared within this scope can be substituted (optional) + debug bool // perform extra debugging checks + // TODO(taking): consider adding Pos // TODO(zpavlinovic): replacements can contain type params // when generating instances inside of a generic function body. - replacements map[*typeparams.TypeParam]types.Type // values should contain no type params - cache map[types.Type]types.Type // cache of subst results - ctxt *typeparams.Context - debug bool // perform extra debugging checks - // TODO(taking): consider adding Pos } // Returns a subster that replaces tparams[i] with targs[i]. Uses ctxt as a cache. // targs should not contain any types in tparams. -func makeSubster(ctxt *typeparams.Context, tparams *typeparams.TypeParamList, targs []types.Type, debug bool) *subster { +// scope is the (optional) lexical block of the generic function for which we are substituting. +func makeSubster(ctxt *types.Context, scope *types.Scope, tparams *types.TypeParamList, targs []types.Type, debug bool) *subster { assert(tparams.Len() == len(targs), "makeSubster argument count must match") subst := &subster{ - replacements: make(map[*typeparams.TypeParam]types.Type, tparams.Len()), + replacements: make(map[*types.TypeParam]types.Type, tparams.Len()), cache: make(map[types.Type]types.Type), ctxt: ctxt, + scope: scope, debug: debug, } for i := 0; i < tparams.Len(); i++ { subst.replacements[tparams.At(i)] = targs[i] } if subst.debug { - if err := subst.wellFormed(); err != nil { - panic(err) - } + subst.wellFormed() } return subst } -// wellFormed returns an error if subst was not properly initialized. -func (subst *subster) wellFormed() error { - if subst == nil || len(subst.replacements) == 0 { - return nil +// wellFormed asserts that subst was properly initialized. +func (subst *subster) wellFormed() { + if subst == nil { + return } // Check that all of the type params do not appear in the arguments. s := make(map[types.Type]bool, len(subst.replacements)) @@ -62,10 +60,9 @@ func (subst *subster) wellFormed() error { } for _, r := range subst.replacements { if reaches(r, s) { - return fmt.Errorf("\n‰r %s s %v replacements %v\n", r, s, subst.replacements) + panic(subst) } } - return nil } // typ returns the type of t with the type parameter tparams[i] substituted @@ -83,7 +80,7 @@ func (subst *subster) typ(t types.Type) (res types.Type) { // fall through if result r will be identical to t, types.Identical(r, t). switch t := t.(type) { - case *typeparams.TypeParam: + case *types.TypeParam: r := subst.replacements[t] assert(r != nil, "type param without replacement encountered") return r @@ -132,7 +129,7 @@ func (subst *subster) typ(t types.Type) (res types.Type) { case *types.Signature: return subst.signature(t) - case *typeparams.Union: + case *types.Union: return subst.union(t) case *types.Interface: @@ -221,25 +218,25 @@ func (subst *subster) var_(v *types.Var) *types.Var { return v } -func (subst *subster) union(u *typeparams.Union) *typeparams.Union { - var out []*typeparams.Term // nil => no updates +func (subst *subster) union(u *types.Union) *types.Union { + var out []*types.Term // nil => no updates for i, n := 0, u.Len(); i < n; i++ { t := u.Term(i) r := subst.typ(t.Type()) if r != t.Type() && out == nil { - out = make([]*typeparams.Term, n) + out = make([]*types.Term, n) for j := 0; j < i; j++ { out[j] = u.Term(j) } } if out != nil { - out[i] = typeparams.NewTerm(t.Tilde(), r) + out[i] = types.NewTerm(t.Tilde(), r) } } if out != nil { - return typeparams.NewUnion(out) + return types.NewUnion(out) } return u } @@ -250,7 +247,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { } // methods for the interface. Initially nil if there is no known change needed. - // Signatures for the method where recv is nil. NewInterfaceType fills in the recievers. + // Signatures for the method where recv is nil. NewInterfaceType fills in the receivers. var methods []*types.Func initMethods := func(n int) { // copy first n explicit methods methods = make([]*types.Func, iface.NumExplicitMethods()) @@ -263,7 +260,7 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { for i := 0; i < iface.NumExplicitMethods(); i++ { f := iface.ExplicitMethod(i) // On interfaces, we need to cycle break on anonymous interface types - // being in a cycle with their signatures being in cycles with their recievers + // being in a cycle with their signatures being in cycles with their receivers // that do not go through a Named. norecv := changeRecv(f.Type().(*types.Signature), nil) sig := subst.typ(norecv) @@ -306,29 +303,56 @@ func (subst *subster) interface_(iface *types.Interface) *types.Interface { } func (subst *subster) named(t *types.Named) types.Type { - // A name type may be: - // (1) ordinary (no type parameters, no type arguments), - // (2) generic (type parameters but no type arguments), or - // (3) instantiated (type parameters and type arguments). - tparams := typeparams.ForNamed(t) + // A named type may be: + // (1) ordinary named type (non-local scope, no type parameters, no type arguments), + // (2) locally scoped type, + // (3) generic (type parameters but no type arguments), or + // (4) instantiated (type parameters and type arguments). + tparams := t.TypeParams() if tparams.Len() == 0 { - // case (1) ordinary + if subst.scope != nil && !subst.scope.Contains(t.Obj().Pos()) { + // Outside the current function scope? + return t // case (1) ordinary + } - // Note: If Go allows for local type declarations in generic - // functions we may need to descend into underlying as well. - return t + // case (2) locally scoped type. + // Create a new named type to represent this instantiation. + // We assume that local types of distinct instantiations of a + // generic function are distinct, even if they don't refer to + // type parameters, but the spec is unclear; see golang/go#58573. + // + // Subtle: We short circuit substitution and use a newly created type in + // subst, i.e. cache[t]=n, to pre-emptively replace t with n in recursive + // types during traversal. This both breaks infinite cycles and allows for + // constructing types with the replacement applied in subst.typ(under). + // + // Example: + // func foo[T any]() { + // type linkedlist struct { + // next *linkedlist + // val T + // } + // } + // + // When the field `next *linkedlist` is visited during subst.typ(under), + // we want the substituted type for the field `next` to be `*n`. + n := types.NewNamed(t.Obj(), nil, nil) + subst.cache[t] = n + subst.cache[n] = n + n.SetUnderlying(subst.typ(t.Underlying())) + return n } - targs := typeparams.NamedTypeArgs(t) + targs := t.TypeArgs() // insts are arguments to instantiate using. insts := make([]types.Type, tparams.Len()) - // case (2) generic ==> targs.Len() == 0 + // case (3) generic ==> targs.Len() == 0 // Instantiating a generic with no type arguments should be unreachable. // Please report a bug if you encounter this. assert(targs.Len() != 0, "substition into a generic Named type is currently unsupported") - // case (3) instantiated. + // case (4) instantiated. // Substitute into the type arguments and instantiate the replacements/ // Example: // type N[A any] func() A @@ -341,13 +365,13 @@ func (subst *subster) named(t *types.Named) types.Type { inst := subst.typ(targs.At(i)) // TODO(generic): Check with rfindley for mutual recursion insts[i] = inst } - r, err := typeparams.Instantiate(subst.ctxt, typeparams.NamedTypeOrigin(t), insts, false) + r, err := types.Instantiate(subst.ctxt, t.Origin(), insts, false) assert(err == nil, "failed to Instantiate Named type") return r } func (subst *subster) signature(t *types.Signature) types.Type { - tparams := typeparams.ForSignature(t) + tparams := t.TypeParams() // We are choosing not to support tparams.Len() > 0 until a need has been observed in practice. // @@ -362,7 +386,7 @@ func (subst *subster) signature(t *types.Signature) types.Type { // no type params to substitute // (2)generic method and recv needs to be substituted. - // Recievers can be either: + // Receivers can be either: // named // pointer to named // interface @@ -372,25 +396,32 @@ func (subst *subster) signature(t *types.Signature) types.Type { params := subst.tuple(t.Params()) results := subst.tuple(t.Results()) if recv != t.Recv() || params != t.Params() || results != t.Results() { - return typeparams.NewSignatureType(recv, nil, nil, params, results, t.Variadic()) + return types.NewSignatureType(recv, nil, nil, params, results, t.Variadic()) } return t } // reaches returns true if a type t reaches any type t' s.t. c[t'] == true. -// Updates c to cache results. +// It updates c to cache results. +// +// reaches is currently only part of the wellFormed debug logic, and +// in practice c is initially only type parameters. It is not currently +// relied on in production. func reaches(t types.Type, c map[types.Type]bool) (res bool) { if c, ok := c[t]; ok { return c } - c[t] = false // prevent cycles + + // c is populated with temporary false entries as types are visited. + // This avoids repeat visits and break cycles. + c[t] = false defer func() { c[t] = res }() switch t := t.(type) { - case *typeparams.TypeParam, *types.Basic: - // no-op => c == false + case *types.TypeParam, *types.Basic: + return false case *types.Array: return reaches(t.Elem(), c) case *types.Slice: @@ -418,7 +449,7 @@ func reaches(t types.Type, c map[types.Type]bool) (res bool) { return true } return reaches(t.Params(), c) || reaches(t.Results(), c) - case *typeparams.Union: + case *types.Union: for i := 0; i < t.Len(); i++ { if reaches(t.Term(i).Type(), c) { return true diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go index db53aebee4..6e9f1282b1 100644 --- a/vendor/golang.org/x/tools/go/ssa/util.go +++ b/vendor/golang.org/x/tools/go/ssa/util.go @@ -43,12 +43,6 @@ func isBlankIdent(e ast.Expr) bool { //// Type utilities. Some of these belong in go/types. -// isPointer returns true for types whose underlying type is a pointer. -func isPointer(typ types.Type) bool { - _, ok := typ.Underlying().(*types.Pointer) - return ok -} - // isNonTypeParamInterface reports whether t is an interface type but not a type parameter. func isNonTypeParamInterface(t types.Type) bool { return !typeparams.IsTypeParam(t) && types.IsInterface(t) @@ -100,12 +94,33 @@ func isBasicConvTypes(tset termList) bool { return all && basics >= 1 && tset.Len()-basics <= 1 } -// deref returns a pointer's element type; otherwise it returns typ. -func deref(typ types.Type) types.Type { +// deptr returns a pointer's element type and true; otherwise it returns (typ, false). +// This function is oblivious to core types and is not suitable for generics. +// +// TODO: Deprecate this function once all usages have been audited. +func deptr(typ types.Type) (types.Type, bool) { if p, ok := typ.Underlying().(*types.Pointer); ok { - return p.Elem() + return p.Elem(), true } - return typ + return typ, false +} + +// deref returns the element type of a type with a pointer core type and true; +// otherwise it returns (typ, false). +func deref(typ types.Type) (types.Type, bool) { + if p, ok := typeparams.CoreType(typ).(*types.Pointer); ok { + return p.Elem(), true + } + return typ, false +} + +// mustDeref returns the element type of a type with a pointer core type. +// Panics on failure. +func mustDeref(typ types.Type) types.Type { + if et, ok := deref(typ); ok { + return et + } + panic("cannot dereference type " + typ.String()) } // recvType returns the receiver type of method obj. @@ -113,6 +128,17 @@ func recvType(obj *types.Func) types.Type { return obj.Type().(*types.Signature).Recv().Type() } +// fieldOf returns the index'th field of the (core type of) a struct type; +// otherwise returns nil. +func fieldOf(typ types.Type, index int) *types.Var { + if st, ok := typeparams.CoreType(typ).(*types.Struct); ok { + if 0 <= index && index < st.NumFields() { + return st.Field(index) + } + } + return nil +} + // isUntyped returns true for types that are untyped. func isUntyped(typ types.Type) bool { b, ok := typ.(*types.Basic) @@ -154,39 +180,19 @@ func makeLen(T types.Type) *Builtin { } } -// nonbasicTypes returns a list containing all of the types T in ts that are non-basic. -func nonbasicTypes(ts []types.Type) []types.Type { - if len(ts) == 0 { - return nil - } - added := make(map[types.Type]bool) // additionally filter duplicates - var filtered []types.Type - for _, T := range ts { - if !isBasic(T) { - if !added[T] { - added[T] = true - filtered = append(filtered, T) - } - } - } - return filtered -} - -// receiverTypeArgs returns the type arguments to a function's reciever. -// Returns an empty list if obj does not have a reciever or its reciever does not have type arguments. +// receiverTypeArgs returns the type arguments to a function's receiver. +// Returns an empty list if obj does not have a receiver or its receiver does not have type arguments. func receiverTypeArgs(obj *types.Func) []types.Type { rtype := recvType(obj) if rtype == nil { return nil } - if isPointer(rtype) { - rtype = rtype.(*types.Pointer).Elem() - } + rtype, _ = deptr(rtype) named, ok := rtype.(*types.Named) if !ok { return nil } - ts := typeparams.NamedTypeArgs(named) + ts := named.TypeArgs() if ts.Len() == 0 { return nil } @@ -205,7 +211,7 @@ func recvAsFirstArg(sig *types.Signature) *types.Signature { for i := 0; i < sig.Params().Len(); i++ { params = append(params, sig.Params().At(i)) } - return typeparams.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) + return types.NewSignatureType(nil, nil, nil, types.NewTuple(params...), sig.Results(), sig.Variadic()) } // instance returns whether an expression is a simple or qualified identifier @@ -222,13 +228,13 @@ func instance(info *types.Info, expr ast.Expr) bool { default: return false } - _, ok := typeparams.GetInstances(info)[id] + _, ok := info.Instances[id] return ok } // instanceArgs returns the Instance[id].TypeArgs as a slice. func instanceArgs(info *types.Info, id *ast.Ident) []types.Type { - targList := typeparams.GetInstances(info)[id].TypeArgs + targList := info.Instances[id].TypeArgs if targList == nil { return nil } @@ -280,7 +286,7 @@ func (c *canonizer) Type(T types.Type) types.Type { return T } -// A type for representating an canonized list of types. +// A type for representing a canonized list of types. type typeList []types.Type func (l *typeList) identical(ts []types.Type) bool { @@ -346,13 +352,13 @@ func (m *typeListMap) hash(ts []types.Type) uint32 { } // instantiateMethod instantiates m with targs and returns a canonical representative for this method. -func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *typeparams.Context) *types.Func { +func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *types.Context) *types.Func { recv := recvType(m) if p, ok := recv.(*types.Pointer); ok { recv = p.Elem() } named := recv.(*types.Named) - inst, err := typeparams.Instantiate(ctxt, typeparams.NamedTypeOrigin(named), targs, false) + inst, err := types.Instantiate(ctxt, named.Origin(), targs, false) if err != nil { panic(err) } @@ -360,3 +366,16 @@ func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctx obj, _, _ := types.LookupFieldOrMethod(rep, true, m.Pkg(), m.Name()) return obj.(*types.Func) } + +// Exposed to ssautil using the linkname hack. +func isSyntactic(pkg *Package) bool { return pkg.syntax } + +// mapValues returns a new unordered array of map values. +func mapValues[K comparable, V any](m map[K]V) []V { + vals := make([]V, 0, len(m)) + for _, fn := range m { + vals = append(vals, fn) + } + return vals + +} diff --git a/vendor/golang.org/x/tools/go/ssa/wrappers.go b/vendor/golang.org/x/tools/go/ssa/wrappers.go index 228daf6158..7c7ee4099e 100644 --- a/vendor/golang.org/x/tools/go/ssa/wrappers.go +++ b/vendor/golang.org/x/tools/go/ssa/wrappers.go @@ -28,7 +28,7 @@ import ( // -- wrappers ----------------------------------------------------------- -// makeWrapper returns a synthetic method that delegates to the +// createWrapper returns a synthetic method that delegates to the // declared method denoted by meth.Obj(), first performing any // necessary pointer indirections or field selections implied by meth. // @@ -40,21 +40,17 @@ import ( // - optional implicit field selections // - meth.Obj() may denote a concrete or an interface method // - the result may be a thunk or a wrapper. -// -// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { +func createWrapper(prog *Program, sel *selection, cr *creator) *Function { obj := sel.obj.(*types.Func) // the declared function sig := sel.typ.(*types.Signature) // type of this wrapper var recv *types.Var // wrapper's receiver or thunk's params[0] name := obj.Name() var description string - var start int // first regular param if sel.kind == types.MethodExpr { name += "$thunk" description = "thunk" recv = sig.Params().At(0) - start = 1 } else { description = "wrapper" recv = sig.Recv() @@ -62,8 +58,9 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { description = fmt.Sprintf("%s for %s", description, sel.obj) if prog.mode&LogSource != 0 { - defer logStack("make %s to (%s)", description, recv.Type())() + defer logStack("create %s to (%s)", description, recv.Type())() } + /* method wrapper */ fn := &Function{ name: name, method: sel, @@ -72,33 +69,53 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { Synthetic: description, Prog: prog, pos: obj.Pos(), - info: nil, // info is not set on wrappers. + // wrappers have no syntax + build: (*builder).buildWrapper, + syntax: nil, + info: nil, + goversion: "", } cr.Add(fn) + return fn +} + +// buildWrapper builds fn.Body for a method wrapper. +func (b *builder) buildWrapper(fn *Function) { + var recv *types.Var // wrapper's receiver or thunk's params[0] + var start int // first regular param + if fn.method.kind == types.MethodExpr { + recv = fn.Signature.Params().At(0) + start = 1 + } else { + recv = fn.Signature.Recv() + } + fn.startBody() fn.addSpilledParam(recv) createParams(fn, start) - indices := sel.index + indices := fn.method.index var v Value = fn.Locals[0] // spilled receiver - if isPointer(sel.recv) { + srdt, ptrRecv := deptr(fn.method.recv) + if ptrRecv { v = emitLoad(fn, v) // For simple indirection wrappers, perform an informative nil-check: // "value method (T).f called using nil *T pointer" - if len(indices) == 1 && !isPointer(recvType(obj)) { + _, ptrObj := deptr(recvType(fn.object)) + if len(indices) == 1 && !ptrObj { var c Call c.Call.Value = &Builtin{ name: "ssa:wrapnilchk", sig: types.NewSignature(nil, - types.NewTuple(anonVar(sel.recv), anonVar(tString), anonVar(tString)), - types.NewTuple(anonVar(sel.recv)), false), + types.NewTuple(anonVar(fn.method.recv), anonVar(tString), anonVar(tString)), + types.NewTuple(anonVar(fn.method.recv)), false), } c.Call.Args = []Value{ v, - stringConst(deref(sel.recv).String()), - stringConst(sel.obj.Name()), + stringConst(srdt.String()), + stringConst(fn.method.obj.Name()), } c.setType(v.Type()) v = fn.emit(&c) @@ -120,18 +137,14 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { // address of implicit C field. var c Call - if r := recvType(obj); !types.IsInterface(r) { // concrete method - if !isPointer(r) { + if r := recvType(fn.object); !types.IsInterface(r) { // concrete method + if _, ptrObj := deptr(r); !ptrObj { v = emitLoad(fn, v) } - callee := prog.originFunc(obj) - if callee.typeparams.Len() > 0 { - callee = prog.lookupOrCreateInstance(callee, receiverTypeArgs(obj), cr) - } - c.Call.Value = callee + c.Call.Value = fn.Prog.objectMethod(fn.object, b.created) c.Call.Args = append(c.Call.Args, v) } else { - c.Call.Method = obj + c.Call.Method = fn.object c.Call.Value = emitLoad(fn, v) // interface (possibly a typeparam) } for _, arg := range fn.Params[1:] { @@ -139,8 +152,6 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { } emitTailCall(fn, &c) fn.finishBody() - fn.done() - return fn } // createParams creates parameters for wrapper method fn based on its @@ -149,13 +160,13 @@ func makeWrapper(prog *Program, sel *selection, cr *creator) *Function { func createParams(fn *Function, start int) { tparams := fn.Signature.Params() for i, n := start, tparams.Len(); i < n; i++ { - fn.addParamObj(tparams.At(i)) + fn.addParamVar(tparams.At(i)) } } // -- bounds ----------------------------------------------------------- -// makeBound returns a bound method wrapper (or "bound"), a synthetic +// createBound returns a bound method wrapper (or "bound"), a synthetic // function that delegates to a concrete or interface method denoted // by obj. The resulting function has no receiver, but has one free // variable which will be used as the method's receiver in the @@ -174,66 +185,57 @@ func createParams(fn *Function, start int) { // // f := func() { return t.meth() } // -// Unlike makeWrapper, makeBound need perform no indirection or field +// Unlike createWrapper, createBound need perform no indirection or field // selections because that can be done before the closure is // constructed. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -func makeBound(prog *Program, obj *types.Func, cr *creator) *Function { - targs := receiverTypeArgs(obj) - key := boundsKey{obj, prog.canon.List(targs)} - - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - fn, ok := prog.bounds[key] - if !ok { - description := fmt.Sprintf("bound method wrapper for %s", obj) - if prog.mode&LogSource != 0 { - defer logStack("%s", description)() - } - fn = &Function{ - name: obj.Name() + "$bound", - object: obj, - Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver - Synthetic: description, - Prog: prog, - pos: obj.Pos(), - info: nil, // info is not set on wrappers. - } - cr.Add(fn) - - fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} - fn.FreeVars = []*FreeVar{fv} - fn.startBody() - createParams(fn, 0) - var c Call - - if !types.IsInterface(recvType(obj)) { // concrete - callee := prog.originFunc(obj) - if callee.typeparams.Len() > 0 { - callee = prog.lookupOrCreateInstance(callee, targs, cr) - } - c.Call.Value = callee - c.Call.Args = []Value{fv} - } else { - c.Call.Method = obj - c.Call.Value = fv // interface (possibly a typeparam) - } - for _, arg := range fn.Params { - c.Call.Args = append(c.Call.Args, arg) - } - emitTailCall(fn, &c) - fn.finishBody() - fn.done() - - prog.bounds[key] = fn +func createBound(prog *Program, obj *types.Func, cr *creator) *Function { + description := fmt.Sprintf("bound method wrapper for %s", obj) + if prog.mode&LogSource != 0 { + defer logStack("%s", description)() + } + /* bound method wrapper */ + fn := &Function{ + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: description, + Prog: prog, + pos: obj.Pos(), + // wrappers have no syntax + build: (*builder).buildBound, + syntax: nil, + info: nil, + goversion: "", } + fn.FreeVars = []*FreeVar{{name: "recv", typ: recvType(obj), parent: fn}} // (cyclic) + cr.Add(fn) return fn } +// buildBound builds fn.Body for a bound method closure. +func (b *builder) buildBound(fn *Function) { + fn.startBody() + createParams(fn, 0) + var c Call + + recv := fn.FreeVars[0] + if !types.IsInterface(recvType(fn.object)) { // concrete + c.Call.Value = fn.Prog.objectMethod(fn.object, b.created) + c.Call.Args = []Value{recv} + } else { + c.Call.Method = fn.object + c.Call.Value = recv // interface (possibly a typeparam) + } + for _, arg := range fn.Params { + c.Call.Args = append(c.Call.Args, arg) + } + emitTailCall(fn, &c) + fn.finishBody() +} + // -- thunks ----------------------------------------------------------- -// makeThunk returns a thunk, a synthetic function that delegates to a +// createThunk returns a thunk, a synthetic function that delegates to a // concrete or interface method denoted by sel.obj. The resulting // function has no receiver, but has an additional (first) regular // parameter. @@ -249,38 +251,16 @@ func makeBound(prog *Program, obj *types.Func, cr *creator) *Function { // f is a synthetic wrapper defined as if by: // // f := func(t T) { return t.meth() } -// -// TODO(adonovan): opt: currently the stub is created even when used -// directly in a function call: C.f(i, 0). This is less efficient -// than inlining the stub. -// -// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -func makeThunk(prog *Program, sel *selection, cr *creator) *Function { +func createThunk(prog *Program, sel *selection, cr *creator) *Function { if sel.kind != types.MethodExpr { panic(sel) } - // Canonicalize sel.recv to avoid constructing duplicate thunks. - canonRecv := prog.canon.Type(sel.recv) - key := selectionKey{ - kind: sel.kind, - recv: canonRecv, - obj: sel.obj, - index: fmt.Sprint(sel.index), - indirect: sel.indirect, + fn := createWrapper(prog, sel, cr) + if fn.Signature.Recv() != nil { + panic(fn) // unexpected receiver } - prog.methodsMu.Lock() - defer prog.methodsMu.Unlock() - - fn, ok := prog.thunks[key] - if !ok { - fn = makeWrapper(prog, sel, cr) - if fn.Signature.Recv() != nil { - panic(fn) // unexpected receiver - } - prog.thunks[key] = fn - } return fn } @@ -288,21 +268,6 @@ func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) } -// selectionKey is like types.Selection but a usable map key. -type selectionKey struct { - kind types.SelectionKind - recv types.Type // canonicalized via Program.canon - obj types.Object - index string - indirect bool -} - -// boundsKey is a unique for the object and a type instantiation. -type boundsKey struct { - obj types.Object // t.meth - inst *typeList // canonical type instantiation list. -} - // A local version of *types.Selection. // Needed for some additional control, such as creating a MethodExpr for an instantiation. type selection struct { @@ -327,16 +292,16 @@ func toSelection(sel *types.Selection) *selection { // -- instantiations -------------------------------------------------- -// buildInstantiationWrapper creates a body for an instantiation +// buildInstantiationWrapper builds the body of an instantiation // wrapper fn. The body calls the original generic function, // bracketed by ChangeType conversions on its arguments and results. -func buildInstantiationWrapper(fn *Function) { +func (b *builder) buildInstantiationWrapper(fn *Function) { orig := fn.topLevelOrigin sig := fn.Signature fn.startBody() if sig.Recv() != nil { - fn.addParamObj(sig.Recv()) + fn.addParamVar(sig.Recv()) } createParams(fn, 0) diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index c160acb686..11d5c8c3ad 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -26,7 +26,6 @@ package objectpath import ( "fmt" "go/types" - "sort" "strconv" "strings" @@ -111,7 +110,20 @@ const ( opObj = 'O' // .Obj() (Named, TypeParam) ) -// The For function returns the path to an object relative to its package, +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects +} + +// For returns the path to an object relative to its package, // or an error if the object is not accessible from the package's Scope. // // The For function guarantees to return a path only for the following objects: @@ -123,6 +135,17 @@ const ( // These objects are sufficient to define the API of their package. // The objects described by a package's export data are drawn from this set. // +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// // For does not return a path for predeclared names, imported package // names, local names, and unexported package-level names (except // types). @@ -143,7 +166,7 @@ const ( // .Type().Field(0) (field Var X) // // where p is the package (*types.Package) to which X belongs. -func For(obj types.Object) (Path, error) { +func (enc *Encoder) For(obj types.Object) (Path, error) { pkg := obj.Pkg() // This table lists the cases of interest. @@ -200,7 +223,7 @@ func For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + if _, ok := obj.Type().(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -225,7 +248,7 @@ func For(obj types.Object) (Path, error) { return "", fmt.Errorf("func is not a method: %v", obj) } - if path, ok := concreteMethod(obj); ok { + if path, ok := enc.concreteMethod(obj); ok { // Fast path for concrete methods that avoids looping over scope. return path, nil } @@ -241,15 +264,14 @@ func For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - names := scope.Names() - for _, name := range names { - o := scope.Lookup(name) + objs := enc.scopeObjects(scope) + for _, o := range objs { tname, ok := o.(*types.TypeName) if !ok { continue // handle non-types in second pass } - path := append(empty, name...) + path := append(empty, o.Name()...) path = append(path, opType) T := o.Type() @@ -261,7 +283,7 @@ func For(obj types.Object) (Path, error) { } } else { if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { // generic named type return Path(r), nil } @@ -275,9 +297,8 @@ func For(obj types.Object) (Path, error) { // Then inspect everything else: // non-types, and declared methods of defined types. - for _, name := range names { - o := scope.Lookup(name) - path := append(empty, name...) + for _, o := range objs { + path := append(empty, o.Name()...) if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) @@ -291,12 +312,12 @@ func For(obj types.Object) (Path, error) { // Inspect declared methods of defined types. if T, ok := o.Type().(*types.Named); ok { path = append(path, opType) - // Note that method index here is always with respect - // to canonical ordering of methods, regardless of how - // they appear in the underlying type. - canonical := canonicalize(T) - for i := 0; i < len(canonical); i++ { - m := canonical[i] + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) path2 := appendOpArg(path, opMethod, i) if m == obj { return Path(path2), nil // found declared method @@ -324,7 +345,7 @@ func appendOpArg(path []byte, op byte, arg int) []byte { // This function is just an optimization that avoids the general scope walking // approach. You are expected to fall back to the general approach if this // function fails. -func concreteMethod(meth *types.Func) (Path, bool) { +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // Concrete methods can only be declared on package-scoped named types. For // that reason we can skip the expensive walk over the package scope: the // path will always be package -> named type -> method. We can trivially get @@ -397,15 +418,24 @@ func concreteMethod(meth *types.Func) (Path, bool) { path := make([]byte, 0, len(name)+8) path = append(path, name...) path = append(path, opType) - canonical := canonicalize(named) - for i, m := range canonical { - if m == meth { + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { path = appendOpArg(path, opMethod, i) return Path(path), true } } - panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) } // find finds obj within type T, returning the path to it, or nil if not found. @@ -432,7 +462,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } return find(obj, T.Elem(), append(path, opElem), seen) case *types.Signature: - if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { return r } if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { @@ -475,7 +505,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] } } return nil - case *typeparams.TypeParam: + case *types.TypeParam: name := T.Obj() if name == obj { return append(path, opObj) @@ -495,7 +525,7 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, opTypeParam, i) @@ -508,11 +538,11 @@ func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte // Object returns the object denoted by path p within the package pkg. func Object(pkg *types.Package, p Path) (types.Object, error) { - if p == "" { + pathstr := string(p) + if pathstr == "" { return nil, fmt.Errorf("empty path") } - pathstr := string(p) var pkgobj, suffix string if dot := strings.IndexByte(pathstr, opType); dot < 0 { pkgobj = pathstr @@ -532,7 +562,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } // abstraction of *types.{Named,Signature} type hasTypeParams interface { - TypeParams() *typeparams.TypeParamList + TypeParams() *types.TypeParamList } // abstraction of *types.{Named,TypeParam} type hasObj interface { @@ -634,7 +664,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = tparams.At(index) case opConstraint: - tparam, ok := t.(*typeparams.TypeParam) + tparam, ok := t.(*types.TypeParam) if !ok { return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) } @@ -663,15 +693,22 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = nil case opMethod: - hasMethods, ok := t.(hasMethods) // Interface or Named - if !ok { + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) + + default: return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) } - canonical := canonicalize(hasMethods) - if n := len(canonical); index >= n { - return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) - } - obj = canonical[index] t = nil case opObj: @@ -694,27 +731,22 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { return obj, nil // success } -// hasMethods is an abstraction of *types.{Interface,Named}. This is pulled up -// because it is used by methodOrdering, which is in turn used by both encoding -// and decoding. -type hasMethods interface { - Method(int) *types.Func - NumMethods() int -} - -// canonicalize returns a canonical order for the methods in a hasMethod. -func canonicalize(hm hasMethods) []*types.Func { - count := hm.NumMethods() - if count <= 0 { - return nil +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m } - canon := make([]*types.Func, count) - for i := 0; i < count; i++ { - canon[i] = hm.Method(i) - } - less := func(i, j int) bool { - return canon[i].Id() < canon[j].Id() + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs } - sort.Slice(canon, less) - return canon + return objs } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 90b3ab0e21..90dc541adf 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -22,7 +22,7 @@ func Callee(info *types.Info, call *ast.CallExpr) types.Object { // Look through type instantiation if necessary. isInstance := false switch fun.(type) { - case *ast.IndexExpr, *typeparams.IndexListExpr: + case *ast.IndexExpr, *ast.IndexListExpr: // When extracting the callee from an *IndexExpr, we need to check that // it is a *types.Func and not a *types.Var. // Example: Don't match a slice m within the expression `m[0]()`. diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 7bd2fdb38b..544246dac1 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -219,7 +219,7 @@ type Hasher struct { // generic types or functions, and instantiated signatures do not have type // parameter lists, we should never encounter a second non-empty type // parameter list when hashing a generic signature. - sigTParams *typeparams.TypeParamList + sigTParams *types.TypeParamList } // MakeHasher returns a new Hasher instance. @@ -297,7 +297,7 @@ func (h Hasher) hashFor(t types.Type) uint32 { // We should never encounter a generic signature while hashing another // generic signature, but defensively set sigTParams only if h.mask is // unset. - tparams := typeparams.ForSignature(t) + tparams := t.TypeParams() if h.sigTParams == nil && tparams.Len() != 0 { h = Hasher{ // There may be something more efficient than discarding the existing @@ -318,7 +318,7 @@ func (h Hasher) hashFor(t types.Type) uint32 { return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) - case *typeparams.Union: + case *types.Union: return h.hashUnion(t) case *types.Interface: @@ -354,14 +354,14 @@ func (h Hasher) hashFor(t types.Type) uint32 { case *types.Named: hash := h.hashPtr(t.Obj()) - targs := typeparams.NamedTypeArgs(t) + targs := t.TypeArgs() for i := 0; i < targs.Len(); i++ { targ := targs.At(i) hash += 2 * h.Hash(targ) } return hash - case *typeparams.TypeParam: + case *types.TypeParam: return h.hashTypeParam(t) case *types.Tuple: @@ -381,7 +381,7 @@ func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { return hash } -func (h Hasher) hashUnion(t *typeparams.Union) uint32 { +func (h Hasher) hashUnion(t *types.Union) uint32 { // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -392,7 +392,7 @@ func (h Hasher) hashUnion(t *typeparams.Union) uint32 { return h.hashTermSet(terms) } -func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 { +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. @@ -416,7 +416,7 @@ func (h Hasher) hashTermSet(terms []*typeparams.Term) uint32 { // are not identical. // // Otherwise the hash of t depends only on t's pointer identity. -func (h Hasher) hashTypeParam(t *typeparams.TypeParam) uint32 { +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { if h.sigTParams != nil { i := t.Index() if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { @@ -489,7 +489,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Pointer: return 4393139 - case *typeparams.Union: + case *types.Union: return 562448657 case *types.Interface: @@ -504,7 +504,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Named: return h.hashPtr(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: return h.hashPtr(t.Obj()) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index d2547c7433..cb6db8893f 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -7,8 +7,8 @@ package imports // import "golang.org/x/tools/imports" import ( - "io/ioutil" "log" + "os" "golang.org/x/tools/internal/gocommand" intimp "golang.org/x/tools/internal/imports" @@ -44,7 +44,7 @@ var LocalPrefix string func Process(filename string, src []byte, opt *Options) ([]byte, error) { var err error if src == nil { - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index d15f0eb7ab..2b29168047 100644 --- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -15,10 +15,6 @@ import ( "strconv" ) -// DiagnoseFuzzTests controls whether the 'tests' analyzer diagnoses fuzz tests -// in Go 1.18+. -var DiagnoseFuzzTests bool = false - func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. offset, end := fset.PositionFor(start, false).Offset, start @@ -46,7 +42,7 @@ func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { case u.Info()&types.IsString != 0: return &ast.BasicLit{Kind: token.STRING, Value: `""`} default: - panic("unknown basic type") + panic(fmt.Sprintf("unknown basic type %v", u)) } case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: return ast.NewIdent("nil") diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go new file mode 100644 index 0000000000..39507723d3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go @@ -0,0 +1,113 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisinternal + +import ( + "fmt" + "go/parser" + "go/token" + "strings" +) + +// MustExtractDoc is like [ExtractDoc] but it panics on error. +// +// To use, define a doc.go file such as: +// +// // Package halting defines an analyzer of program termination. +// // +// // # Analyzer halting +// // +// // halting: reports whether execution will halt. +// // +// // The halting analyzer reports a diagnostic for functions +// // that run forever. To suppress the diagnostics, try inserting +// // a 'break' statement into each loop. +// package halting +// +// import _ "embed" +// +// //go:embed doc.go +// var doc string +// +// And declare your analyzer as: +// +// var Analyzer = &analysis.Analyzer{ +// Name: "halting", +// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// ... +// } +func MustExtractDoc(content, name string) string { + doc, err := ExtractDoc(content, name) + if err != nil { + panic(err) + } + return doc +} + +// ExtractDoc extracts a section of a package doc comment from the +// provided contents of an analyzer package's doc.go file. +// +// A section is a portion of the comment between one heading and +// the next, using this form: +// +// # Analyzer NAME +// +// NAME: SUMMARY +// +// Full description... +// +// where NAME matches the name argument, and SUMMARY is a brief +// verb-phrase that describes the analyzer. The following lines, up +// until the next heading or the end of the comment, contain the full +// description. ExtractDoc returns the portion following the colon, +// which is the form expected by Analyzer.Doc. +// +// Example: +// +// # Analyzer printf +// +// printf: checks consistency of calls to printf +// +// The printf analyzer checks consistency of calls to printf. +// Here is the complete description... +// +// This notation allows a single doc comment to provide documentation +// for multiple analyzers, each in its own section. +// The HTML anchors generated for each heading are predictable. +// +// It returns an error if the content was not a valid Go source file +// containing a package doc comment with a heading of the required +// form. +// +// This machinery enables the package documentation (typically +// accessible via the web at https://pkg.go.dev/) and the command +// documentation (typically printed to a terminal) to be derived from +// the same source and formatted appropriately. +func ExtractDoc(content, name string) (string, error) { + if content == "" { + return "", fmt.Errorf("empty Go source file") + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", content, parser.ParseComments|parser.PackageClauseOnly) + if err != nil { + return "", fmt.Errorf("not a Go source file") + } + if f.Doc == nil { + return "", fmt.Errorf("Go source file has no package doc comment") + } + for _, section := range strings.Split(f.Doc.Text(), "\n# ") { + if body := strings.TrimPrefix(section, "Analyzer "+name); body != section && + body != "" && + body[0] == '\r' || body[0] == '\n' { + body = strings.TrimSpace(body) + rest := strings.TrimPrefix(body, name+":") + if rest == body { + return "", fmt.Errorf("'Analyzer %s' heading not followed by '%s: summary...' line", name, name) + } + return strings.TrimSpace(rest), nil + } + } + return "", fmt.Errorf("package doc comment contains no 'Analyzer %s' heading", name) +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 0000000000..c0e8e731c9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 0000000000..581b26c204 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go deleted file mode 100644 index 798fe599be..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fastwalk provides a faster version of filepath.Walk for file system -// scanning tools. -package fastwalk - -import ( - "errors" - "os" - "path/filepath" - "runtime" - "sync" -) - -// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the -// symlink named in the call may be traversed. -var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") - -// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the -// callback should not be called for any other files in the current directory. -// Child directories will still be traversed. -var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") - -// Walk is a faster implementation of filepath.Walk. -// -// filepath.Walk's design necessarily calls os.Lstat on each file, -// even if the caller needs less info. -// Many tools need only the type of each file. -// On some platforms, this information is provided directly by the readdir -// system call, avoiding the need to stat each file individually. -// fastwalk_unix.go contains a fork of the syscall routines. -// -// See golang.org/issue/16399 -// -// Walk walks the file tree rooted at root, calling walkFn for -// each file or directory in the tree, including root. -// -// If fastWalk returns filepath.SkipDir, the directory is skipped. -// -// Unlike filepath.Walk: -// - file stat calls must be done by the user. -// The only provided metadata is the file type, which does not include -// any permission bits. -// - multiple goroutines stat the filesystem concurrently. The provided -// walkFn must be safe for concurrent use. -// - fastWalk can follow symlinks if walkFn returns the TraverseLink -// sentinel error. It is the walkFn's responsibility to prevent -// fastWalk from going into symlink cycles. -func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { - // TODO(bradfitz): make numWorkers configurable? We used a - // minimum of 4 to give the kernel more info about multiple - // things we want, in hopes its I/O scheduling can take - // advantage of that. Hopefully most are in cache. Maybe 4 is - // even too low of a minimum. Profile more. - numWorkers := 4 - if n := runtime.NumCPU(); n > numWorkers { - numWorkers = n - } - - // Make sure to wait for all workers to finish, otherwise - // walkFn could still be called after returning. This Wait call - // runs after close(e.donec) below. - var wg sync.WaitGroup - defer wg.Wait() - - w := &walker{ - fn: walkFn, - enqueuec: make(chan walkItem, numWorkers), // buffered for performance - workc: make(chan walkItem, numWorkers), // buffered for performance - donec: make(chan struct{}), - - // buffered for correctness & not leaking goroutines: - resc: make(chan error, numWorkers), - } - defer close(w.donec) - - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go w.doWork(&wg) - } - todo := []walkItem{{dir: root}} - out := 0 - for { - workc := w.workc - var workItem walkItem - if len(todo) == 0 { - workc = nil - } else { - workItem = todo[len(todo)-1] - } - select { - case workc <- workItem: - todo = todo[:len(todo)-1] - out++ - case it := <-w.enqueuec: - todo = append(todo, it) - case err := <-w.resc: - out-- - if err != nil { - return err - } - if out == 0 && len(todo) == 0 { - // It's safe to quit here, as long as the buffered - // enqueue channel isn't also readable, which might - // happen if the worker sends both another unit of - // work and its result before the other select was - // scheduled and both w.resc and w.enqueuec were - // readable. - select { - case it := <-w.enqueuec: - todo = append(todo, it) - default: - return nil - } - } - } - } -} - -// doWork reads directories as instructed (via workc) and runs the -// user's callback function. -func (w *walker) doWork(wg *sync.WaitGroup) { - defer wg.Done() - for { - select { - case <-w.donec: - return - case it := <-w.workc: - select { - case <-w.donec: - return - case w.resc <- w.walk(it.dir, !it.callbackDone): - } - } - } -} - -type walker struct { - fn func(path string, typ os.FileMode) error - - donec chan struct{} // closed on fastWalk's return - workc chan walkItem // to workers - enqueuec chan walkItem // from workers - resc chan error // from workers -} - -type walkItem struct { - dir string - callbackDone bool // callback already called; don't do it again -} - -func (w *walker) enqueue(it walkItem) { - select { - case w.enqueuec <- it: - case <-w.donec: - } -} - -func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { - joined := dirName + string(os.PathSeparator) + baseName - if typ == os.ModeDir { - w.enqueue(walkItem{dir: joined}) - return nil - } - - err := w.fn(joined, typ) - if typ == os.ModeSymlink { - if err == ErrTraverseLink { - // Set callbackDone so we don't call it twice for both the - // symlink-as-symlink and the symlink-as-directory later: - w.enqueue(walkItem{dir: joined, callbackDone: true}) - return nil - } - if err == filepath.SkipDir { - // Permit SkipDir on symlinks too. - return nil - } - } - return err -} - -func (w *walker) walk(root string, runUserCallback bool) error { - if runUserCallback { - err := w.fn(root, os.ModeDir) - if err == filepath.SkipDir { - return nil - } - if err != nil { - return err - } - } - - return readDir(root, w.onDirEnt) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go deleted file mode 100644 index 0ca55e0d56..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && cgo -// +build darwin,cgo - -package fastwalk - -/* -#include - -// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent** -// result pointer which triggers CGO's "Go pointer to Go pointer" check unless -// we allocat the result dirent* with malloc. -// -// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the -// directory, or a positive error number to indicate failure. -static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) { - struct dirent *result; - int ret = readdir_r(fd, entry, &result); - if (ret == 0 && result == NULL) { - ret = -1; // EOF - } - return ret; -} -*/ -import "C" - -import ( - "os" - "syscall" - "unsafe" -) - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := openDir(dirName) - if err != nil { - return &os.PathError{Op: "opendir", Path: dirName, Err: err} - } - defer C.closedir(fd) - - skipFiles := false - var dirent syscall.Dirent - for { - ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent)))) - if ret != 0 { - if ret == -1 { - break // EOF - } - if ret == int(syscall.EINTR) { - continue - } - return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)} - } - if dirent.Ino == 0 { - continue - } - typ := dtToType(dirent.Type) - if skipFiles && typ.IsRegular() { - continue - } - name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] - name = name[:dirent.Namlen] - for i, c := range name { - if c == 0 { - name = name[:i] - break - } - } - // Check for useless names before allocating a string. - if string(name) == "." || string(name) == ".." { - continue - } - if err := fn(dirName, string(name), typ); err != nil { - if err != ErrSkipFiles { - return err - } - skipFiles = true - } - } - - return nil -} - -func dtToType(typ uint8) os.FileMode { - switch typ { - case syscall.DT_BLK: - return os.ModeDevice - case syscall.DT_CHR: - return os.ModeDevice | os.ModeCharDevice - case syscall.DT_DIR: - return os.ModeDir - case syscall.DT_FIFO: - return os.ModeNamedPipe - case syscall.DT_LNK: - return os.ModeSymlink - case syscall.DT_REG: - return 0 - case syscall.DT_SOCK: - return os.ModeSocket - } - return ^os.FileMode(0) -} - -// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR -// needs to be closed with closedir(3). -func openDir(path string) (*C.DIR, error) { - name, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - for { - fd, err := C.opendir((*C.char)(unsafe.Pointer(name))) - if err != syscall.EINTR { - return fd, err - } - } -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go deleted file mode 100644 index d58595dbd3..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd - -package fastwalk - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Fileno) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go deleted file mode 100644 index d3922890b0..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (linux || (darwin && !cgo)) && !appengine -// +build linux darwin,!cgo -// +build !appengine - -package fastwalk - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return dirent.Ino -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go deleted file mode 100644 index 38a4db6af3..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (darwin && !cgo) || freebsd || openbsd || netbsd -// +build darwin,!cgo freebsd openbsd netbsd - -package fastwalk - -import "syscall" - -func direntNamlen(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Namlen) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go deleted file mode 100644 index c82e57df85..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !appengine -// +build linux,!appengine - -package fastwalk - -import ( - "bytes" - "syscall" - "unsafe" -) - -func direntNamlen(dirent *syscall.Dirent) uint64 { - const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - const nameBufLen = uint16(len(nameBuf)) - limit := dirent.Reclen - fixedHdr - if limit > nameBufLen { - limit = nameBufLen - } - nameLen := bytes.IndexByte(nameBuf[:limit], 0) - if nameLen < 0 { - panic("failed to find terminating 0 byte in dirent") - } - return uint64(nameLen) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go deleted file mode 100644 index 085d311600..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd) -// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd - -package fastwalk - -import ( - "io/ioutil" - "os" -) - -// readDir calls fn for each directory entry in dirName. -// It does not descend into directories or follow symlinks. -// If fn returns a non-nil error, readDir returns with that error -// immediately. -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fis, err := ioutil.ReadDir(dirName) - if err != nil { - return err - } - skipFiles := false - for _, fi := range fis { - if fi.Mode().IsRegular() && skipFiles { - continue - } - if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == ErrSkipFiles { - skipFiles = true - continue - } - return err - } - } - return nil -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go deleted file mode 100644 index f12f1a734c..0000000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine -// +build linux freebsd openbsd netbsd darwin,!cgo -// +build !appengine - -package fastwalk - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const blockSize = 8 << 10 - -// unknownFileMode is a sentinel (and bogus) os.FileMode -// value used to represent a syscall.DT_UNKNOWN Dirent.Type. -const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := open(dirName, 0, 0) - if err != nil { - return &os.PathError{Op: "open", Path: dirName, Err: err} - } - defer syscall.Close(fd) - - // The buffer must be at least a block long. - buf := make([]byte, blockSize) // stack-allocated; doesn't escape - bufp := 0 // starting read position in buf - nbuf := 0 // end valid data in buf - skipFiles := false - for { - if bufp >= nbuf { - bufp = 0 - nbuf, err = readDirent(fd, buf) - if err != nil { - return os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - return nil - } - } - consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) - bufp += consumed - if name == "" || name == "." || name == ".." { - continue - } - // Fallback for filesystems (like old XFS) that don't - // support Dirent.Type and have DT_UNKNOWN (0) there - // instead. - if typ == unknownFileMode { - fi, err := os.Lstat(dirName + "/" + name) - if err != nil { - // It got deleted in the meantime. - if os.IsNotExist(err) { - continue - } - return err - } - typ = fi.Mode() & os.ModeType - } - if skipFiles && typ.IsRegular() { - continue - } - if err := fn(dirName, name, typ); err != nil { - if err == ErrSkipFiles { - skipFiles = true - continue - } - return err - } - } -} - -func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { - // golang.org/issue/37269 - dirent := &syscall.Dirent{} - copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) - if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { - panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) - } - if len(buf) < int(dirent.Reclen) { - panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) - } - consumed = int(dirent.Reclen) - if direntInode(dirent) == 0 { // File absent in directory. - return - } - switch dirent.Type { - case syscall.DT_REG: - typ = 0 - case syscall.DT_DIR: - typ = os.ModeDir - case syscall.DT_LNK: - typ = os.ModeSymlink - case syscall.DT_BLK: - typ = os.ModeDevice - case syscall.DT_FIFO: - typ = os.ModeNamedPipe - case syscall.DT_SOCK: - typ = os.ModeSocket - case syscall.DT_UNKNOWN: - typ = unknownFileMode - default: - // Skip weird things. - // It's probably a DT_WHT (http://lwn.net/Articles/325369/) - // or something. Revisit if/when this package is moved outside - // of goimports. goimports only cares about regular files, - // symlinks, and directories. - return - } - - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - nameLen := direntNamlen(dirent) - - // Special cases for common things: - if nameLen == 1 && nameBuf[0] == '.' { - name = "." - } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { - name = ".." - } else { - name = string(nameBuf[:nameLen]) - } - return -} - -// According to https://golang.org/doc/go1.14#runtime -// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS -// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases. -// -// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors. -// We need to retry in this case. -func open(path string, mode int, perm uint32) (fd int, err error) { - for { - fd, err := syscall.Open(path, mode, perm) - if err != syscall.EINTR { - return fd, err - } - } -} - -func readDirent(fd int, buf []byte) (n int, err error) { - for { - nbuf, err := syscall.ReadDirent(fd, buf) - if err != syscall.EINTR { - return nbuf, err - } - } -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6d3..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de01470..d98b0db2a9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a64..2d078ccb19 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( @@ -17,7 +29,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -209,7 +220,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func switch hdr { case "$$B\n": var data []byte - data, err = ioutil.ReadAll(buf) + data, err = io.ReadAll(buf) if err != nil { break } @@ -218,20 +229,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcdd1..2ee8c70164 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,17 +22,22 @@ import ( "strconv" "strings" + "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. // -// No promises are made about the encoding other than that it can be -// decoded by the same version of IIExportShallow. If you plan to save -// export data in the file system, be sure to include a cryptographic -// digest of the executable in the key to avoid version skew. -func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of // fact iexportCommon doesn't even check for I/O errors. @@ -44,22 +49,30 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) if err != nil { return nil, err } return pkgs[0], nil } -// InsertType is the type of a function that creates a types.TypeName -// object for a named type and inserts it into the scope of the -// specified Package. -type InsertType = func(pkg *types.Package, name string) +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -313,8 +326,9 @@ type iexporter struct { out *bytes.Buffer version int - shallow bool // don't put types from other packages in the index - localpkg *types.Package // (nil in bundle mode) + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -354,6 +368,17 @@ func (p *iexporter) trace(format string, args ...interface{}) { fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) } +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + // stringOff returns the offset of s within the string section. // If not already present, it's added to the end. func (p *iexporter) stringOff(s string) uint64 { @@ -413,7 +438,6 @@ type exportWriter struct { p *iexporter data intWriter - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -436,7 +460,6 @@ func (p *iexporter) doDecl(obj types.Object) { }() } w := p.newWriter() - w.setPkg(obj.Pkg(), false) switch obj := obj.(type) { case *types.Var: @@ -457,7 +480,7 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { + if sig.TypeParams().Len() == 0 { w.tag('F') } else { w.tag('G') @@ -470,7 +493,7 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) @@ -483,14 +506,14 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { + if tparam, ok := t.(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -511,17 +534,17 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { w.tag('T') } else { w.tag('U') } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } underlying := obj.Type().Underlying() @@ -541,7 +564,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -673,6 +696,9 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -713,19 +739,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -764,30 +790,53 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.setPkg(pkg, true) + w.pkg(pkg) w.signature(t) case *types.Struct: w.startType(structType) n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg if n > 0 { - w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects - } else { - w.setPkg(pkg, true) + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } } + w.pkg(fieldPkg) w.uint64(uint64(n)) + for i := 0; i < n; i++ { f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } w.pos(f.Pos()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), pkg) + w.typ(f.Type(), fieldPkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } case *types.Interface: w.startType(interfaceType) - w.setPkg(pkg, true) + w.pkg(pkg) n := t.NumEmbeddeds() w.uint64(uint64(n)) @@ -802,17 +851,23 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.typ(ft, tPkg) } + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + n = t.NumExplicitMethods() w.uint64(uint64(n)) for i := 0; i < n; i++ { m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } w.pos(m.Pos()) w.string(m.Name()) sig, _ := m.Type().(*types.Signature) w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -827,12 +882,61 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } } -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return } - - w.currPkg = pkg + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) } func (w *exportWriter) signature(sig *types.Signature) { @@ -843,14 +947,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -868,7 +972,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { @@ -913,6 +1017,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) { w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) @@ -969,6 +1084,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1303,19 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e86..9bde15e3bc 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/types/objectpath" ) type intReader struct { @@ -85,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) if err != nil { return 0, nil, err } @@ -94,10 +94,49 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -108,7 +147,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -117,11 +156,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -139,7 +175,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data sLen := int64(r.uint64()) var fLen int64 var fileOffset []uint64 - if insert != nil { + if shallow { // Shallow mode uses a different position encoding. fLen = int64(r.uint64()) fileOffset = make([]uint64, r.uint64()) @@ -158,7 +194,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p := iimporter{ version: int(version), ipath: path, - insert: insert, + shallow: shallow, + reportf: reportf, stringData: stringData, stringCache: make(map[uint64]string), @@ -185,8 +222,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p.typCache[uint64(i)] = pt } - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) pkgName := p.stringAt(r.uint64()) @@ -195,30 +233,42 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] - if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - if i == 0 && !bundle { - p.localpkg = pkg - } - - p.pkgCache[pkgPathOff] = pkg + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff // Read index for package. nameIndex := make(map[string]uint64) nSyms := r.uint64() - // In shallow mode we don't expect an index for other packages. - assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } - p.pkgIndex[pkg] = nameIndex + items[i].nameIndex = nameIndex + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex pkgList[i] = pkg } @@ -270,18 +320,25 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { typ.Complete() } + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + return pkgs, nil } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -289,8 +346,8 @@ type iimporter struct { version int ipath string - localpkg *types.Package - insert func(pkg *types.Package, name string) // "shallow" mode only + shallow bool + reportf ReportFunc // if non-nil, used to report bugs stringData []byte stringCache map[uint64]string @@ -307,6 +364,12 @@ type iimporter struct { fake fakeFileSet interfaceList []*types.Interface + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + // Arguments for calls to SetConstraint that are deferred due to recursive types later []setConstraintArgs @@ -338,13 +401,9 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { - // In "shallow" mode, call back to the application to - // find the object and insert it into the package scope. - if p.insert != nil { - assert(pkg != p.localpkg) - p.insert(pkg, name) // "can't fail" - return - } + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. errorf("%v.%v not in index", pkg, name) } @@ -489,7 +548,7 @@ func (r *importReader) obj(name string) { r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) case 'F', 'G': - var tparams []*typeparams.TypeParam + var tparams []*types.TypeParam if tag == 'G' { tparams = r.tparamList() } @@ -506,7 +565,7 @@ func (r *importReader) obj(name string) { r.declare(obj) if tag == 'U' { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -523,12 +582,12 @@ func (r *importReader) obj(name string) { // typeparams being used in the method sig/body). base := baseType(recv.Type()) assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + targs := base.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = targs.At(i).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -546,7 +605,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -562,7 +621,7 @@ func (r *importReader) obj(name string) { if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -711,7 +770,8 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { - if r.p.insert != nil { // shallow mode + if r.p.shallow { + // precise offsets are encoded only in shallow mode return r.posv2() } if r.p.version >= iexportVersionPosCol { @@ -812,13 +872,28 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + fpos := r.pos() fname := r.ident() ftyp := r.typ() emb := r.bool() tag := r.string() - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field tags[i] = tag } return types.NewStruct(fields, tags) @@ -834,6 +909,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods := make([]*types.Func, r.uint64()) for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + mpos := r.pos() mname := r.ident() @@ -843,9 +923,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if base != nil { recv = types.NewVar(token.NoPos, r.currPkg, "", base) } - msig := r.signature(recv, nil, nil) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method } typ := newInterface(methods, embeddeds) @@ -882,18 +965,21 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) return t case unionType: if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -901,23 +987,43 @@ func (r *importReader) kind() itag { return itag(r.uint64()) } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = r.typ().(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11ce2..b977435f62 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -10,8 +10,10 @@ package gcimporter import ( + "fmt" "go/token" "go/types" + "sort" "strings" "golang.org/x/tools/internal/pkgbits" @@ -62,6 +64,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) @@ -121,6 +131,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st iface.Complete() } + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + pkg.MarkComplete() return pkg } @@ -260,39 +280,9 @@ func (r *reader) doPkg() *types.Package { pkg := types.NewPackage(path, name) r.p.imports[path] = pkg - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(flattenImports(imports)) - return pkg } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { - var res []*types.Package - seen := make(map[*types.Package]struct{}) - for _, pkg := range pkgs { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - - // pkg.Imports() is already flattened. - for _, pkg := range pkg.Imports() { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - } - } - return res -} - // @@@ Types func (r *reader) typ() types.Type { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d50551693f..55312522dc 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,13 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "os/exec" + "reflect" "regexp" "runtime" "strconv" @@ -19,9 +22,10 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -51,9 +55,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -61,13 +75,19 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. +// Postcondition: both error results have same nilness. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -75,23 +95,24 @@ func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) } - event.Error(ctx, "Load concurrency error, will retry serially", err) - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { // Wait for 1 worker to become available. select { case <-ctx.Done(): - return nil, nil, nil, ctx.Err() + return nil, nil, ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: defer func() { <-runner.inFlight }() } @@ -101,6 +122,7 @@ func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { // Make sure the runner is always initialized. runner.initialize() @@ -109,7 +131,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // runPiped commands. select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.serialized <- struct{}{}: defer func() { <-runner.serialized }() } @@ -119,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde for i := 0; i < maxInFlight; i++ { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: // Make sure we always "return" any workers we took. defer func() { <-runner.inFlight }() @@ -152,6 +174,7 @@ type Invocation struct { Logf func(format string, args ...interface{}) } +// Postcondition: both error results have same nilness. func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { rawError = i.run(ctx, stdout, stderr) if rawError != nil { @@ -215,6 +238,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +264,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +278,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.” + // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +364,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +384,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d474..446c5846a6 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 1684053226..52f74e643b 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -9,15 +9,12 @@ package gopathwalk import ( "bufio" "bytes" - "fmt" - "io/ioutil" + "io/fs" "log" "os" "path/filepath" "strings" "time" - - "golang.org/x/tools/internal/fastwalk" ) // Options controls the behavior of a Walk call. @@ -47,21 +44,18 @@ type Root struct { } // Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// add will be called concurrently. func Walk(roots []Root, add func(root Root, dir string), opts Options) { WalkSkip(roots, add, func(Root, string) bool { return false }, opts) } // WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// For each directory that will be scanned, skip will be called (concurrently) +// For each directory that will be scanned, skip will be called // with the absolute paths of the containing source directory and the directory. // If skip returns false on a directory it will be processed. -// add will be called concurrently. -// skip will be called concurrently. func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { for _, root := range roots { walkDir(root, add, skip, opts) @@ -78,21 +72,36 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) } start := time.Now() if opts.Logf != nil { - opts.Logf("gopathwalk: scanning %s", root.Path) + opts.Logf("scanning %s", root.Path) } + w := &walker{ - root: root, - add: add, - skip: skip, - opts: opts, + root: root, + add: add, + skip: skip, + opts: opts, + added: make(map[string]bool), } w.init() - if err := fastwalk.Walk(root.Path, w.walk); err != nil { - log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + + // Add a trailing path separator to cause filepath.WalkDir to traverse symlinks. + path := root.Path + if len(path) == 0 { + path = "." + string(filepath.Separator) + } else if !os.IsPathSeparator(path[len(path)-1]) { + path = path + string(filepath.Separator) + } + + if err := filepath.WalkDir(path, w.walk); err != nil { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + logf("scanning directory %v: %v", root.Path, err) } if opts.Logf != nil { - opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) } } @@ -103,7 +112,10 @@ type walker struct { skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. opts Options // Options passed to Walk by the user. - ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. + pathSymlinks []os.FileInfo + ignoredDirs []string + + added map[string]bool } // init initializes the walker based on its Options @@ -119,13 +131,9 @@ func (w *walker) init() { for _, p := range ignoredPaths { full := filepath.Join(w.root.Path, p) - if fi, err := os.Stat(full); err == nil { - w.ignoredDirs = append(w.ignoredDirs, fi) - if w.opts.Logf != nil { - w.opts.Logf("Directory added to ignore list: %s", full) - } - } else if w.opts.Logf != nil { - w.opts.Logf("Error statting ignored directory: %v", err) + w.ignoredDirs = append(w.ignoredDirs, full) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) } } } @@ -135,7 +143,7 @@ func (w *walker) init() { // The provided path is one of the $GOPATH entries with "src" appended. func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") - slurp, err := ioutil.ReadFile(file) + slurp, err := os.ReadFile(file) if w.opts.Logf != nil { if err != nil { w.opts.Logf("%v", err) @@ -160,9 +168,9 @@ func (w *walker) getIgnoredDirs(path string) []string { } // shouldSkipDir reports whether the file should be skipped or not. -func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { +func (w *walker) shouldSkipDir(dir string) bool { for _, ignoredDir := range w.ignoredDirs { - if os.SameFile(fi, ignoredDir) { + if dir == ignoredDir { return true } } @@ -174,81 +182,150 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { } // walk walks through the given path. -func (w *walker) walk(path string, typ os.FileMode) error { - if typ.IsRegular() { +// +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored: +// walk returns only nil or fs.SkipDir. +func (w *walker) walk(path string, d fs.DirEntry, err error) error { + if err != nil { + // We have no way to report errors back through Walk or WalkSkip, + // so just log and ignore them. + if w.opts.Logf != nil { + w.opts.Logf("%v", err) + } + if d == nil { + // Nothing more to do: the error prevents us from knowing + // what path even represents. + return nil + } + } + + if d.Type().IsRegular() { + if !strings.HasSuffix(path, ".go") { + return nil + } + dir := filepath.Dir(path) if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.ErrSkipFiles - } - if !strings.HasSuffix(path, ".go") { return nil } - w.add(w.root, dir) - return fastwalk.ErrSkipFiles + if !w.added[dir] { + w.add(w.root, dir) + w.added[dir] = true + } + return nil } - if typ == os.ModeDir { + + if d.IsDir() { base := filepath.Base(path) if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" || (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || (!w.opts.ModulesEnabled && base == "node_modules") { - return filepath.SkipDir + return fs.SkipDir } - fi, err := os.Lstat(path) - if err == nil && w.shouldSkipDir(fi, path) { - return filepath.SkipDir + if w.shouldSkipDir(path) { + return fs.SkipDir } return nil } - if typ == os.ModeSymlink { - base := filepath.Base(path) - if strings.HasPrefix(base, ".#") { - // Emacs noise. - return nil + + if d.Type()&os.ModeSymlink != 0 { + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil || !fi.IsDir() { + // Not a directory. Just walk the file (or broken link) and be done. + return w.walk(path, fs.FileInfoToDirEntry(fi), err) } - if w.shouldTraverse(path) { - return fastwalk.ErrTraverseLink + + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for _, parent := range w.pathSymlinks { + if os.SameFile(fi, parent) { + return nil + } } - } - return nil -} -// shouldTraverse reports whether the symlink fi, found in dir, -// should be followed. It makes sure symlinks were never visited -// before to avoid symlink loops. -func (w *walker) shouldTraverse(path string) bool { - ts, err := os.Stat(path) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return false - } - if !ts.IsDir() { - return false - } - if w.shouldSkipDir(ts, filepath.Dir(path)) { - return false - } - // Check for symlink loops by statting each directory component - // and seeing if any are the same file as ts. - for { - parent := filepath.Dir(path) - if parent == path { - // Made it to the root without seeing a cycle. - // Use this symlink. - return true + w.pathSymlinks = append(w.pathSymlinks, fi) + defer func() { + w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1] + }() + + // On some platforms the OS (or the Go os package) sometimes fails to + // resolve directory symlinks before a trailing slash + // (even though POSIX requires it to do so). + // + // On macOS that failure may be caused by a known libc/kernel bug; + // see https://go.dev/issue/59586. + // + // On Windows before Go 1.21, it may be caused by a bug in + // os.Lstat (fixed in https://go.dev/cl/463177). + // + // Since we need to handle this explicitly on broken platforms anyway, + // it is simplest to just always do that and not rely on POSIX pathname + // resolution to walk the directory (such as by calling WalkDir with + // a trailing slash appended to the path). + // + // Instead, we make a sequence of walk calls — directly and through + // recursive calls to filepath.WalkDir — simulating what WalkDir would do + // if the symlink were a regular directory. + + // First we call walk on the path as a directory + // (instead of a symlink). + err = w.walk(path, fs.FileInfoToDirEntry(fi), nil) + if err == fs.SkipDir { + return nil + } else if err != nil { + // This should be impossible, but handle it anyway in case + // walk is changed to return other errors. + return err } - parentInfo, err := os.Stat(parent) + + // Now read the directory and walk its entries. + ents, err := os.ReadDir(path) if err != nil { - return false + // Report the ReadDir error, as filepath.WalkDir would do. + err = w.walk(path, fs.FileInfoToDirEntry(fi), err) + if err == fs.SkipDir { + return nil + } else if err != nil { + return err // Again, should be impossible. + } + // Fall through and iterate over whatever entries we did manage to get. } - if os.SameFile(ts, parentInfo) { - // Cycle. Don't traverse. - return false + + for _, d := range ents { + nextPath := filepath.Join(path, d.Name()) + if d.IsDir() { + // We want to walk the whole directory tree rooted at nextPath, + // not just the single entry for the directory. + err := filepath.WalkDir(nextPath, w.walk) + if err != nil && w.opts.Logf != nil { + w.opts.Logf("%v", err) + } + } else { + err := w.walk(nextPath, d, nil) + if err == fs.SkipDir { + // Skip the rest of the entries in the parent directory of nextPath + // (that is, path itself). + break + } else if err != nil { + return err // Again, should be impossible. + } + } } - path = parent + return nil } + // Not a file, regular directory, or symlink; skip. + return nil } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 642a5ac2d7..dd369c072e 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -13,6 +13,7 @@ import ( "go/build" "go/parser" "go/token" + "io/fs" "io/ioutil" "os" "path" @@ -26,6 +27,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -106,7 +108,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { considerTests := strings.HasSuffix(filename, "_test.go") fileBase := filepath.Base(filename) - packageFileInfos, err := ioutil.ReadDir(srcDir) + packageFileInfos, err := os.ReadDir(srcDir) if err != nil { return nil } @@ -252,7 +254,7 @@ type pass struct { otherFiles []*ast.File // sibling files. // Intermediate state, generated by load. - existingImports map[string]*ImportInfo + existingImports map[string][]*ImportInfo allRefs references missingRefs references @@ -317,7 +319,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { func (p *pass) load() ([]*ImportFix, bool) { p.knownPackages = map[string]*packageInfo{} p.missingRefs = references{} - p.existingImports = map[string]*ImportInfo{} + p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. p.allRefs = collectReferences(p.f) @@ -348,7 +350,7 @@ func (p *pass) load() ([]*ImportFix, bool) { } } for _, imp := range imports { - p.existingImports[p.importIdentifier(imp)] = imp + p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp) } // Find missing references. @@ -387,36 +389,45 @@ func (p *pass) fix() ([]*ImportFix, bool) { // Found everything, or giving up. Add the new imports and remove any unused. var fixes []*ImportFix - for _, imp := range p.existingImports { - // We deliberately ignore globals here, because we can't be sure - // they're in the same package. People do things like put multiple - // main packages in the same directory, and we don't want to - // remove imports if they happen to have the same name as a var in - // a different package. - if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { - fixes = append(fixes, &ImportFix{ - StmtInfo: *imp, - IdentName: p.importIdentifier(imp), - FixType: DeleteImport, - }) - continue - } + for _, identifierImports := range p.existingImports { + for _, imp := range identifierImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } - // An existing import may need to update its import name to be correct. - if name := p.importSpecName(imp); name != imp.Name { - fixes = append(fixes, &ImportFix{ - StmtInfo: ImportInfo{ - Name: name, - ImportPath: imp.ImportPath, - }, - IdentName: p.importIdentifier(imp), - FixType: SetImportName, - }) + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } } } + // Collecting fixes involved map iteration, so sort for stability. See + // golang/go#59976. + sortFixes(fixes) + // collect selected fixes in a separate slice, so that it can be sorted + // separately. Note that these fixes must occur after fixes to existing + // imports. TODO(rfindley): figure out why. + var selectedFixes []*ImportFix for _, imp := range selected { - fixes = append(fixes, &ImportFix{ + selectedFixes = append(selectedFixes, &ImportFix{ StmtInfo: ImportInfo{ Name: p.importSpecName(imp), ImportPath: imp.ImportPath, @@ -425,8 +436,25 @@ func (p *pass) fix() ([]*ImportFix, bool) { FixType: AddImport, }) } + sortFixes(selectedFixes) - return fixes, true + return append(fixes, selectedFixes...), true +} + +func sortFixes(fixes []*ImportFix) { + sort.Slice(fixes, func(i, j int) bool { + fi, fj := fixes[i], fixes[j] + if fi.StmtInfo.ImportPath != fj.StmtInfo.ImportPath { + return fi.StmtInfo.ImportPath < fj.StmtInfo.ImportPath + } + if fi.StmtInfo.Name != fj.StmtInfo.Name { + return fi.StmtInfo.Name < fj.StmtInfo.Name + } + if fi.IdentName != fj.IdentName { + return fi.IdentName < fj.IdentName + } + return fi.FixType < fj.FixType + }) } // importSpecName gets the import name of imp in the import spec. @@ -519,7 +547,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { var fixImports = fixImportsDefault func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { - fixes, err := getFixes(fset, f, filename, env) + fixes, err := getFixes(context.Background(), fset, f, filename, env) if err != nil { return err } @@ -529,7 +557,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. -func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { abs, err := filepath.Abs(filename) if err != nil { return nil, err @@ -583,7 +611,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // Go look for candidates in $GOPATH, etc. We don't necessarily load // the real exports of sibling imports, so keep assuming their contents. - if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { return nil, err } @@ -1031,7 +1059,10 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []string) } -func addExternalCandidates(pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -1441,11 +1472,11 @@ func VendorlessPath(ipath string) string { func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { // Look for non-test, buildable .go files which could provide exports. - all, err := ioutil.ReadDir(dir) + all, err := os.ReadDir(dir) if err != nil { return "", nil, err } - var files []os.FileInfo + var files []fs.DirEntry for _, fi := range all { name := fi.Name() if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 95a88383a7..58e637b90f 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/format" @@ -23,6 +24,7 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. @@ -66,14 +68,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { return nil, err } - return getFixes(fileSet, file, filename, opt.Env) + return getFixes(ctx, fileSet, file, filename, opt.Env) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 7d99d04ca8..5f4d435d3c 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -19,6 +18,7 @@ import ( "strings" "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -37,7 +37,7 @@ type ModuleResolver struct { mains []*gocommand.ModuleJSON mainByDir map[string]*gocommand.ModuleJSON modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or Dir. + modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -123,7 +123,7 @@ func (r *ModuleResolver) init() error { }) sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.modsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) } return count(j) < count(i) // descending order }) @@ -264,7 +264,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, } // Not cached. Read the filesystem. - pkgFiles, err := ioutil.ReadDir(pkgDir) + pkgFiles, err := os.ReadDir(pkgDir) if err != nil { continue } @@ -327,6 +327,10 @@ func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue @@ -365,7 +369,7 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { readModName := func(modFile string) string { - modBytes, err := ioutil.ReadFile(modFile) + modBytes, err := os.ReadFile(modFile) if err != nil { return "" } @@ -424,6 +428,9 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( } func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() + if err := r.init(); err != nil { return err } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 18dada495c..45690abbb4 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/internal/gopathwalk" ) -// To find packages to import, the resolver needs to know about all of the +// To find packages to import, the resolver needs to know about all of // the packages that could be imported. This includes packages that are // already in modules that are in (1) the current module, (2) replace targets, // and (3) packages in the module cache. Packages in (1) and (2) may change over diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 31a75949cd..9f992c2bec 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -93,6 +93,7 @@ var stdlib = map[string][]string{ "Compare", "Contains", "ContainsAny", + "ContainsFunc", "ContainsRune", "Count", "Cut", @@ -147,6 +148,11 @@ var stdlib = map[string][]string{ "TrimSpace", "TrimSuffix", }, + "cmp": { + "Compare", + "Less", + "Ordered", + }, "compress/bzip2": { "NewReader", "StructuralError", @@ -228,6 +234,7 @@ var stdlib = map[string][]string{ "Ring", }, "context": { + "AfterFunc", "Background", "CancelCauseFunc", "CancelFunc", @@ -239,8 +246,11 @@ var stdlib = map[string][]string{ "WithCancel", "WithCancelCause", "WithDeadline", + "WithDeadlineCause", "WithTimeout", + "WithTimeoutCause", "WithValue", + "WithoutCancel", }, "crypto": { "BLAKE2b_256", @@ -445,6 +455,7 @@ var stdlib = map[string][]string{ "XORBytes", }, "crypto/tls": { + "AlertError", "Certificate", "CertificateRequestInfo", "CertificateVerificationError", @@ -476,6 +487,7 @@ var stdlib = map[string][]string{ "LoadX509KeyPair", "NewLRUClientSessionCache", "NewListener", + "NewResumptionState", "NoClientCert", "PKCS1WithSHA1", "PKCS1WithSHA256", @@ -484,6 +496,27 @@ var stdlib = map[string][]string{ "PSSWithSHA256", "PSSWithSHA384", "PSSWithSHA512", + "ParseSessionState", + "QUICClient", + "QUICConfig", + "QUICConn", + "QUICEncryptionLevel", + "QUICEncryptionLevelApplication", + "QUICEncryptionLevelEarly", + "QUICEncryptionLevelHandshake", + "QUICEncryptionLevelInitial", + "QUICEvent", + "QUICEventKind", + "QUICHandshakeDone", + "QUICNoEvent", + "QUICRejectedEarlyData", + "QUICServer", + "QUICSessionTicketOptions", + "QUICSetReadSecret", + "QUICSetWriteSecret", + "QUICTransportParameters", + "QUICTransportParametersRequired", + "QUICWriteData", "RecordHeaderError", "RenegotiateFreelyAsClient", "RenegotiateNever", @@ -493,6 +526,7 @@ var stdlib = map[string][]string{ "RequireAndVerifyClientCert", "RequireAnyClientCert", "Server", + "SessionState", "SignatureScheme", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", @@ -523,6 +557,7 @@ var stdlib = map[string][]string{ "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_RC4_128_SHA", "VerifyClientCertIfGiven", + "VersionName", "VersionSSL30", "VersionTLS10", "VersionTLS11", @@ -618,6 +653,7 @@ var stdlib = map[string][]string{ "PureEd25519", "RSA", "RevocationList", + "RevocationListEntry", "SHA1WithRSA", "SHA256WithRSA", "SHA256WithRSAPSS", @@ -1002,10 +1038,42 @@ var stdlib = map[string][]string{ "COMPRESS_LOOS", "COMPRESS_LOPROC", "COMPRESS_ZLIB", + "COMPRESS_ZSTD", "Chdr32", "Chdr64", "Class", "CompressionType", + "DF_1_CONFALT", + "DF_1_DIRECT", + "DF_1_DISPRELDNE", + "DF_1_DISPRELPND", + "DF_1_EDITED", + "DF_1_ENDFILTEE", + "DF_1_GLOBAL", + "DF_1_GLOBAUDIT", + "DF_1_GROUP", + "DF_1_IGNMULDEF", + "DF_1_INITFIRST", + "DF_1_INTERPOSE", + "DF_1_KMOD", + "DF_1_LOADFLTR", + "DF_1_NOCOMMON", + "DF_1_NODEFLIB", + "DF_1_NODELETE", + "DF_1_NODIRECT", + "DF_1_NODUMP", + "DF_1_NOHDR", + "DF_1_NOKSYMS", + "DF_1_NOOPEN", + "DF_1_NORELOC", + "DF_1_NOW", + "DF_1_ORIGIN", + "DF_1_PIE", + "DF_1_SINGLETON", + "DF_1_STUB", + "DF_1_SYMINTPOSE", + "DF_1_TRANS", + "DF_1_WEAKFILTER", "DF_BIND_NOW", "DF_ORIGIN", "DF_STATIC_TLS", @@ -1144,6 +1212,7 @@ var stdlib = map[string][]string{ "Dyn32", "Dyn64", "DynFlag", + "DynFlag1", "DynTag", "EI_ABIVERSION", "EI_CLASS", @@ -2111,6 +2180,7 @@ var stdlib = map[string][]string{ "R_PPC64_REL16_LO", "R_PPC64_REL24", "R_PPC64_REL24_NOTOC", + "R_PPC64_REL24_P9NOTOC", "R_PPC64_REL30", "R_PPC64_REL32", "R_PPC64_REL64", @@ -2848,6 +2918,7 @@ var stdlib = map[string][]string{ "MaxVarintLen16", "MaxVarintLen32", "MaxVarintLen64", + "NativeEndian", "PutUvarint", "PutVarint", "Read", @@ -2963,6 +3034,7 @@ var stdlib = map[string][]string{ }, "errors": { "As", + "ErrUnsupported", "Is", "Join", "New", @@ -2989,6 +3061,7 @@ var stdlib = map[string][]string{ "Arg", "Args", "Bool", + "BoolFunc", "BoolVar", "CommandLine", "ContinueOnError", @@ -3119,6 +3192,7 @@ var stdlib = map[string][]string{ "Inspect", "InterfaceType", "IsExported", + "IsGenerated", "KeyValueExpr", "LabeledStmt", "Lbl", @@ -3169,6 +3243,7 @@ var stdlib = map[string][]string{ "ArchChar", "Context", "Default", + "Directive", "FindOnly", "IgnoreVendor", "Import", @@ -3184,6 +3259,7 @@ var stdlib = map[string][]string{ "go/build/constraint": { "AndExpr", "Expr", + "GoVersion", "IsGoBuild", "IsPlusBuild", "NotExpr", @@ -3626,6 +3702,7 @@ var stdlib = map[string][]string{ "ErrBadHTML", "ErrBranchEnd", "ErrEndContext", + "ErrJSTemplate", "ErrNoSuchTemplate", "ErrOutputContext", "ErrPartialCharset", @@ -3870,6 +3947,8 @@ var stdlib = map[string][]string{ "FileInfo", "FileInfoToDirEntry", "FileMode", + "FormatDirEntry", + "FormatFileInfo", "Glob", "GlobFS", "ModeAppend", @@ -3942,6 +4021,78 @@ var stdlib = map[string][]string{ "SetPrefix", "Writer", }, + "log/slog": { + "Any", + "AnyValue", + "Attr", + "Bool", + "BoolValue", + "Debug", + "DebugContext", + "Default", + "Duration", + "DurationValue", + "Error", + "ErrorContext", + "Float64", + "Float64Value", + "Group", + "GroupValue", + "Handler", + "HandlerOptions", + "Info", + "InfoContext", + "Int", + "Int64", + "Int64Value", + "IntValue", + "JSONHandler", + "Kind", + "KindAny", + "KindBool", + "KindDuration", + "KindFloat64", + "KindGroup", + "KindInt64", + "KindLogValuer", + "KindString", + "KindTime", + "KindUint64", + "Level", + "LevelDebug", + "LevelError", + "LevelInfo", + "LevelKey", + "LevelVar", + "LevelWarn", + "Leveler", + "Log", + "LogAttrs", + "LogValuer", + "Logger", + "MessageKey", + "New", + "NewJSONHandler", + "NewLogLogger", + "NewRecord", + "NewTextHandler", + "Record", + "SetDefault", + "Source", + "SourceKey", + "String", + "StringValue", + "TextHandler", + "Time", + "TimeKey", + "TimeValue", + "Uint64", + "Uint64Value", + "Value", + "Warn", + "WarnContext", + "With", + }, "log/syslog": { "Dial", "LOG_ALERT", @@ -3977,6 +4128,13 @@ var stdlib = map[string][]string{ "Priority", "Writer", }, + "maps": { + "Clone", + "Copy", + "DeleteFunc", + "Equal", + "EqualFunc", + }, "math": { "Abs", "Acos", @@ -4371,6 +4529,7 @@ var stdlib = map[string][]string{ "ErrNoLocation", "ErrNotMultipart", "ErrNotSupported", + "ErrSchemeMismatch", "ErrServerClosed", "ErrShortBody", "ErrSkipAltProtocol", @@ -5084,6 +5243,8 @@ var stdlib = map[string][]string{ "NumCPU", "NumCgoCall", "NumGoroutine", + "PanicNilError", + "Pinner", "ReadMemStats", "ReadTrace", "SetBlockProfileRate", @@ -5172,6 +5333,37 @@ var stdlib = map[string][]string{ "Task", "WithRegion", }, + "slices": { + "BinarySearch", + "BinarySearchFunc", + "Clip", + "Clone", + "Compact", + "CompactFunc", + "Compare", + "CompareFunc", + "Contains", + "ContainsFunc", + "Delete", + "DeleteFunc", + "Equal", + "EqualFunc", + "Grow", + "Index", + "IndexFunc", + "Insert", + "IsSorted", + "IsSortedFunc", + "Max", + "MaxFunc", + "Min", + "MinFunc", + "Replace", + "Reverse", + "Sort", + "SortFunc", + "SortStableFunc", + }, "sort": { "Find", "Float64Slice", @@ -5242,6 +5434,7 @@ var stdlib = map[string][]string{ "Compare", "Contains", "ContainsAny", + "ContainsFunc", "ContainsRune", "Count", "Cut", @@ -5299,6 +5492,9 @@ var stdlib = map[string][]string{ "Mutex", "NewCond", "Once", + "OnceFunc", + "OnceValue", + "OnceValues", "Pool", "RWMutex", "WaitGroup", @@ -9135,10 +9331,12 @@ var stdlib = map[string][]string{ "SYS_AIO_CANCEL", "SYS_AIO_ERROR", "SYS_AIO_FSYNC", + "SYS_AIO_MLOCK", "SYS_AIO_READ", "SYS_AIO_RETURN", "SYS_AIO_SUSPEND", "SYS_AIO_SUSPEND_NOCANCEL", + "SYS_AIO_WAITCOMPLETE", "SYS_AIO_WRITE", "SYS_ALARM", "SYS_ARCH_PRCTL", @@ -9368,6 +9566,7 @@ var stdlib = map[string][]string{ "SYS_GET_MEMPOLICY", "SYS_GET_ROBUST_LIST", "SYS_GET_THREAD_AREA", + "SYS_GSSD_SYSCALL", "SYS_GTTY", "SYS_IDENTITYSVC", "SYS_IDLE", @@ -9411,8 +9610,24 @@ var stdlib = map[string][]string{ "SYS_KLDSYM", "SYS_KLDUNLOAD", "SYS_KLDUNLOADF", + "SYS_KMQ_NOTIFY", + "SYS_KMQ_OPEN", + "SYS_KMQ_SETATTR", + "SYS_KMQ_TIMEDRECEIVE", + "SYS_KMQ_TIMEDSEND", + "SYS_KMQ_UNLINK", "SYS_KQUEUE", "SYS_KQUEUE1", + "SYS_KSEM_CLOSE", + "SYS_KSEM_DESTROY", + "SYS_KSEM_GETVALUE", + "SYS_KSEM_INIT", + "SYS_KSEM_OPEN", + "SYS_KSEM_POST", + "SYS_KSEM_TIMEDWAIT", + "SYS_KSEM_TRYWAIT", + "SYS_KSEM_UNLINK", + "SYS_KSEM_WAIT", "SYS_KTIMER_CREATE", "SYS_KTIMER_DELETE", "SYS_KTIMER_GETOVERRUN", @@ -9504,11 +9719,14 @@ var stdlib = map[string][]string{ "SYS_NFSSVC", "SYS_NFSTAT", "SYS_NICE", + "SYS_NLM_SYSCALL", "SYS_NLSTAT", "SYS_NMOUNT", "SYS_NSTAT", "SYS_NTP_ADJTIME", "SYS_NTP_GETTIME", + "SYS_NUMA_GETAFFINITY", + "SYS_NUMA_SETAFFINITY", "SYS_OABI_SYSCALL_BASE", "SYS_OBREAK", "SYS_OLDFSTAT", @@ -9891,6 +10109,7 @@ var stdlib = map[string][]string{ "SYS___ACL_SET_FD", "SYS___ACL_SET_FILE", "SYS___ACL_SET_LINK", + "SYS___CAP_RIGHTS_GET", "SYS___CLONE", "SYS___DISABLE_THREADSIGNAL", "SYS___GETCWD", @@ -10574,6 +10793,7 @@ var stdlib = map[string][]string{ "Short", "T", "TB", + "Testing", "Verbose", }, "testing/fstest": { @@ -10603,6 +10823,9 @@ var stdlib = map[string][]string{ "SetupError", "Value", }, + "testing/slogtest": { + "TestHandler", + }, "text/scanner": { "Char", "Comment", @@ -10826,6 +11049,7 @@ var stdlib = map[string][]string{ "Cs", "Cuneiform", "Cypriot", + "Cypro_Minoan", "Cyrillic", "Dash", "Deprecated", @@ -10889,6 +11113,7 @@ var stdlib = map[string][]string{ "Kaithi", "Kannada", "Katakana", + "Kawi", "Kayah_Li", "Kharoshthi", "Khitan_Small_Script", @@ -10943,6 +11168,7 @@ var stdlib = map[string][]string{ "Myanmar", "N", "Nabataean", + "Nag_Mundari", "Nandinagari", "Nd", "New_Tai_Lue", @@ -10964,6 +11190,7 @@ var stdlib = map[string][]string{ "Old_Sogdian", "Old_South_Arabian", "Old_Turkic", + "Old_Uyghur", "Oriya", "Osage", "Osmanya", @@ -11038,6 +11265,7 @@ var stdlib = map[string][]string{ "Tai_Viet", "Takri", "Tamil", + "Tangsa", "Tangut", "Telugu", "Terminal_Punctuation", @@ -11052,6 +11280,7 @@ var stdlib = map[string][]string{ "ToLower", "ToTitle", "ToUpper", + "Toto", "TurkishCase", "Ugaritic", "Unified_Ideograph", @@ -11061,6 +11290,7 @@ var stdlib = map[string][]string{ "Vai", "Variation_Selector", "Version", + "Vithkuqi", "Wancho", "Warang_Citi", "White_Space", diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f0b..44719de173 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f29..7e638ec24f 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -57,3 +59,93 @@ func GetLines(file *token.File) []int { panic("unexpected token.File size") } } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d30..cdab988531 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -23,6 +23,7 @@ package typeparams import ( + "fmt" "go/ast" "go/token" "go/types" @@ -41,7 +42,7 @@ func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Ex switch e := n.(type) { case *ast.IndexExpr: return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: + case *ast.IndexListExpr: return e.X, e.Lbrack, e.Indices, e.Rbrack } return nil, token.NoPos, nil, token.NoPos @@ -62,7 +63,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke Rbrack: rbrack, } default: - return &IndexListExpr{ + return &ast.IndexListExpr{ X: x, Lbrack: lbrack, Indices: indices, @@ -73,7 +74,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke // IsTypeParam reports whether t is a type parameter. func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) + _, ok := t.(*types.TypeParam) return ok } @@ -87,7 +88,6 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } base := recv.Type() @@ -100,12 +100,37 @@ func OriginMethod(fn *types.Func) *types.Func { // Receiver is a *types.Interface. return fn } - if ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { // Receiver base has no type parameters, so we can avoid the lookup below. return fn } - orig := NamedTypeOrigin(named) + orig := named.Origin() gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + + // In golang/go#61196, we observe another crash, this time inexplicable. + if gfn == nil { + panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) + } + return gfn.(*types.Func) } @@ -132,7 +157,7 @@ func OriginMethod(fn *types.Func) *types.Func { // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. @@ -142,9 +167,9 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { return types.AssignableTo(V, T) } - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { return types.AssignableTo(V, T) } @@ -157,7 +182,7 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { // Minor optimization: ensure we share a context across the two // instantiations below. if ctxt == nil { - ctxt = NewContext() + ctxt = types.NewContext() } var targs []types.Type @@ -165,12 +190,12 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { targs = append(targs, vtparams.At(i)) } - vinst, err := Instantiate(ctxt, V, targs, true) + vinst, err := types.Instantiate(ctxt, V, targs, true) if err != nil { panic("type parameters should satisfy their own constraints") } - tinst, err := Instantiate(ctxt, T, targs, true) + tinst, err := types.Instantiate(ctxt, T, targs, true) if err != nil { return false } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec90..7ea8840eab 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -81,13 +81,13 @@ func CoreType(T types.Type) types.Type { // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, @@ -108,15 +108,15 @@ func CoreType(T types.Type) types.Type { // // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { +func _NormalTerms(typ types.Type) ([]*types.Term, error) { switch typ := typ.(type) { - case *TypeParam: + case *types.TypeParam: return StructuralTerms(typ) - case *Union: + case *types.Union: return UnionTermSet(typ) case *types.Interface: return InterfaceTermSet(typ) default: - return []*Term{NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, typ)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390e1..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d67148823c..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 9c631b6512..93c80fdc96 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -60,7 +60,7 @@ var ErrEmptyTypeSet = errors.New("empty type set") // // StructuralTerms makes no guarantees about the order of terms, except that it // is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { constraint := tparam.Constraint() if constraint == nil { return nil, fmt.Errorf("%s has nil constraint", tparam) @@ -78,7 +78,7 @@ func StructuralTerms(tparam *TypeParam) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { return computeTermSet(iface) } @@ -88,11 +88,11 @@ func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func UnionTermSet(union *Union) ([]*Term, error) { +func UnionTermSet(union *types.Union) ([]*types.Term, error) { return computeTermSet(union) } -func computeTermSet(typ types.Type) ([]*Term, error) { +func computeTermSet(typ types.Type) ([]*types.Term, error) { tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) if err != nil { return nil, err @@ -103,9 +103,9 @@ func computeTermSet(typ types.Type) ([]*Term, error) { if tset.terms.isAll() { return nil, nil } - var terms []*Term + var terms []*types.Term for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) + terms = append(terms, types.NewTerm(term.tilde, term.typ)) } return terms, nil } @@ -162,7 +162,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in tset.terms = allTermlist for i := 0; i < u.NumEmbeddeds(); i++ { embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { + if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } tset2, err := computeTermSetInternal(embedded, seen, depth+1) @@ -171,7 +171,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in } tset.terms = tset.terms.intersect(tset2.terms) } - case *Union: + case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil for i := 0; i < u.Len(); i++ { @@ -184,7 +184,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, err } terms = tset2.terms - case *TypeParam, *Union: + case *types.TypeParam, *types.Union: // A stand-alone type parameter or union is not permitted as union // term. return nil, fmt.Errorf("invalid union term %T", t) @@ -199,7 +199,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) } } - case *TypeParam: + case *types.TypeParam: panic("unreachable") default: // For all other types, the term set is just a single non-tilde term diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a23d..cbd12f8013 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string { var buf bytes.Buffer for i, x := range xl { if i > 0 { - buf.WriteString(" ∪ ") + buf.WriteString(" | ") } buf.WriteString(x.String()) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index b4788978ff..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index 114a36b866..0000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d98..7350bb702a 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types" // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ types.Type diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 0000000000..bbabcd22e9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 0000000000..562eef21fa --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 0000000000..a7b79207ae --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions always reports the a file's Go version as the +// zero version at this Go version. +func FileVersions(info *types.Info, file *ast.File) string { return "" } + +// InitFileVersions is a noop at this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 0000000000..7b9ba89a82 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions maps a file to the file's semantic Go version. +// The reported version is the zero version if a version cannot be determined. +func FileVersions(info *types.Info, file *ast.File) string { + return info.FileVersions[file] +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 0000000000..e16f6c33a5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 0a6304d51d..5dfff4cb2c 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -8,6 +8,17 @@ // // For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -17,24 +28,26 @@ // ctx := context.Background() // iamcredentialsService, err := iamcredentials.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package iamcredentials // import "google.golang.org/api/iamcredentials/v1" import ( diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index cecbb9ba11..829383f55b 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -91,16 +91,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { s2aMTLSEndpoint: "", } - // Check the env to determine whether to use S2A. - if !isGoogleS2AEnabled() { + if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - // If client cert is found, use that over S2A. - // If MTLS is not enabled for the endpoint, skip S2A. - if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { - return &defaultTransportConfig, nil - } s2aMTLSEndpoint := settings.DefaultMTLSEndpoint // If there is endpoint override, honor it. if settings.Endpoint != "" { @@ -118,10 +112,6 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { }, nil } -func isGoogleS2AEnabled() bool { - return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" -} - // getClientCertificateSource returns a default client certificate source, if // not provided by the user. // @@ -275,8 +265,36 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil } +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" { + return false + } + // If MTLS is not enabled for this endpoint, skip S2A. + if !mtlsEndpointEnabledForS2A() { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } + return true +} + // mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. var mtlsEndpointEnabledForS2A = func() bool { // TODO(xmenxk): determine this via discovery config. return true } + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 92b3acf6ed..05165f333b 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -78,9 +78,8 @@ const ( // met: // // (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users +// (a) Scope for self-signed JWT flow is enabled +// (b) Audiences are explicitly provided by users // (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 693a1b1aba..f39dd00d99 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -15,6 +15,7 @@ import ( "github.com/google/uuid" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" ) // Use this error type to return an error which allows introspection of both @@ -43,6 +44,16 @@ func (e wrappedCallErr) Is(target error) bool { // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -77,6 +88,16 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go index c5b421f554..c70f2419b4 100644 --- a/vendor/google.golang.org/api/internal/s2a.go +++ b/vendor/google.golang.org/api/internal/s2a.go @@ -13,7 +13,7 @@ import ( "cloud.google.com/go/compute/metadata" ) -const configEndpointSuffix = "googleAutoMtlsConfiguration" +const configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" // The period an MTLS config can be reused before needing refresh. var configExpiry = time.Hour diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 3a3874df11..84f9302dcf 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -9,6 +9,8 @@ import ( "crypto/tls" "errors" "net/http" + "os" + "strconv" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -16,6 +18,10 @@ import ( "google.golang.org/grpc" ) +const ( + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" +) + // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { @@ -47,6 +53,7 @@ type DialSettings struct { ImpersonationConfig *impersonate.Config EnableDirectPath bool EnableDirectPathXds bool + EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: @@ -77,6 +84,16 @@ func (ds *DialSettings) HasCustomAudience() bool { return len(ds.Audiences) > 0 } +func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + if ds.EnableNewAuthLibrary { + return true + } + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + return b + } + return false +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 46ad187ec1..c95bd7144f 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.126.0" +const Version = "0.152.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 3b8461d1da..b2b249eec6 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -150,6 +150,19 @@ func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } +// EnableNewAuthLibrary returns a ClientOption that specifies if libraries in this +// module to delegate auth to our new library. This option will be removed in +// the future once all clients have been moved to the new auth layer. +func EnableNewAuthLibrary() option.ClientOption { + return enableNewAuthLibrary(true) +} + +type enableNewAuthLibrary bool + +func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { + o.EnableNewAuthLibrary = bool(w) +} + // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to // create their own client options by embedding this type into their own // client-specific option wrapper. See example for usage. diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index edebc73ad4..5481a74cbb 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"34333739363230323936363635393736363430\"", + "etag": "\"39383633393336373936373236333033393737\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -92,6 +92,242 @@ }, "protocol": "rest", "resources": { + "anywhereCache": { + "methods": { + "disable": { + "description": "Disables an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.disable", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns the metadata of an Anywhere Cache instance.", + "httpMethod": "GET", + "id": "storage.anywhereCaches.get", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches", + "request": { + "$ref": "AnywhereCache" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Returns a list of Anywhere Cache instances of the bucket matching the criteria.", + "httpMethod": "GET", + "id": "storage.anywhereCaches.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items return in a single page of responses. Maximum 1000.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCache", + "response": { + "$ref": "AnywhereCaches" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "pause": { + "description": "Pauses an Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.pause", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "resume": { + "description": "Resumes a paused or disabled Anywhere Cache instance.", + "httpMethod": "POST", + "id": "storage.anywhereCaches.resume", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume", + "response": { + "$ref": "AnywhereCache" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates the config(ttl and admissionPolicy) of an Anywhere Cache instance.", + "httpMethod": "PATCH", + "id": "storage.anywhereCaches.update", + "parameterOrder": [ + "bucket", + "anywhereCacheId" + ], + "parameters": { + "anywhereCacheId": { + "description": "The ID of requested Anywhere Cache instance.", + "location": "path", + "required": true, + "type": "string" + }, + "bucket": { + "description": "Name of the partent bucket", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + "request": { + "$ref": "AnywhereCache" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, "bucketAccessControls": { "methods": { "delete": { @@ -446,6 +682,12 @@ "project" ], "parameters": { + "enableObjectRetention": { + "default": "false", + "description": "When set to true, object retention is enabled for this bucket.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this bucket.", "enum": [ @@ -1138,6 +1380,301 @@ } } }, + "managedFolders": { + "methods": { + "delete": { + "description": "Permanently deletes a managed folder.", + "httpMethod": "DELETE", + "id": "storage.managedFolders.delete", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the managed folder if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the managed folder if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata of the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.get", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.getIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new managed folder.", + "httpMethod": "POST", + "id": "storage.managedFolders.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "request": { + "$ref": "ManagedFolder" + }, + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists managed folders in the given bucket.", + "httpMethod": "GET", + "id": "storage.managedFolders.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items return in a single page of responses.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "The managed folder name/path prefix to filter the output list of results.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "response": { + "$ref": "ManagedFolders" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified managed folder.", + "httpMethod": "PUT", + "id": "storage.managedFolders.setIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.managedFolders.testIamPermissions", + "parameterOrder": [ + "bucket", + "managedFolder", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, "notifications": { "methods": { "delete": { @@ -1311,7 +1848,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1357,7 +1894,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1399,7 +1936,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1444,7 +1981,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1493,7 +2030,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1545,7 +2082,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1572,6 +2109,34 @@ }, "objects": { "methods": { + "bulkRestore": { + "description": "Initiates a long-running bulk restore operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.objects.bulkRestore", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/o/bulkRestore", + "request": { + "$ref": "BulkRestoreObjectsRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "compose": { "description": "Concatenates a list of existing objects into a new object in the same bucket.", "httpMethod": "POST", @@ -1588,7 +2153,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1662,7 +2227,7 @@ ], "parameters": { "destinationBucket": { - "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1773,7 +2338,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1843,7 +2408,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1907,7 +2472,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -1925,6 +2490,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -1967,7 +2537,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2054,7 +2624,7 @@ "type": "string" }, "name": { - "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "query", "type": "string" }, @@ -2136,6 +2706,11 @@ "location": "query", "type": "string" }, + "includeFoldersAsPrefixes": { + "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + "location": "query", + "type": "boolean" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2177,6 +2752,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "location": "query", + "type": "boolean" + }, "startOffset": { "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", "location": "query", @@ -2252,11 +2832,16 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this object.", "enum": [ @@ -2309,6 +2894,94 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "restore": { + "description": "Restores a soft-deleted object.", + "httpMethod": "POST", + "id": "storage.objects.restore", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "location": "query", + "type": "boolean" + }, + "generation": { + "description": "Selects a specific revision of this object.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/restore", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, "rewrite": { "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", "httpMethod": "POST", @@ -2332,7 +3005,7 @@ "type": "string" }, "destinationObject": { - "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2443,7 +3116,7 @@ "type": "string" }, "sourceObject": { - "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2489,7 +3162,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2536,7 +3209,7 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" @@ -2612,11 +3285,16 @@ "type": "string" }, "object": { - "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", "location": "path", "required": true, "type": "string" }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this object.", "enum": [ @@ -2688,20 +3366,169 @@ "location": "query", "type": "string" }, - "endOffset": { - "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "includeTrailingDelimiter": { + "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.cancel", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/cancel", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation.", + "httpMethod": "GET", + "id": "storage.buckets.operations.get", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}", + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request.", + "httpMethod": "GET", + "id": "storage.buckets.operations.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for operations.", + "location": "path", + "required": true, + "type": "string" + }, + "filter": { + "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", "location": "query", "type": "string" }, - "includeTrailingDelimiter": { - "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - "location": "query", - "type": "boolean" - }, - "maxResults": { - "default": "1000", - "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - "format": "uint32", + "pageSize": { + "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + "format": "int32", "location": "query", "minimum": "0", "type": "integer" @@ -2710,48 +3537,11 @@ "description": "A previously-returned page token representing part of the larger set of results to view.", "location": "query", "type": "string" - }, - "prefix": { - "description": "Filter results to objects whose names begin with this prefix.", - "location": "query", - "type": "string" - }, - "projection": { - "description": "Set of properties to return. Defaults to noAcl.", - "enum": [ - "full", - "noAcl" - ], - "enumDescriptions": [ - "Include all properties.", - "Omit the owner, acl property." - ], - "location": "query", - "type": "string" - }, - "startOffset": { - "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - "location": "query", - "type": "string" - }, - "userProject": { - "description": "The project to be billed for this request. Required for Requester Pays buckets.", - "location": "query", - "type": "string" - }, - "versions": { - "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - "location": "query", - "type": "boolean" } }, - "path": "b/{bucket}/o/watch", - "request": { - "$ref": "Channel", - "parameterName": "resource" - }, + "path": "b/{bucket}/operations", "response": { - "$ref": "Channel" + "$ref": "GoogleLongrunningListOperationsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -2759,8 +3549,7 @@ "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only", "https://www.googleapis.com/auth/devstorage.read_write" - ], - "supportsSubscription": true + ] } } }, @@ -3010,9 +3799,87 @@ } } }, - "revision": "20230301", + "revision": "20231028", "rootUrl": "https://storage.googleapis.com/", "schemas": { + "AnywhereCache": { + "description": "An Anywhere Cache instance.", + "id": "AnywhereCache", + "properties": { + "admissionPolicy": { + "description": "The cache-level entry admission policy.", + "type": "string" + }, + "anywhereCacheId": { + "description": "The ID of the Anywhere cache instance.", + "type": "string" + }, + "bucket": { + "description": "The name of the bucket containing this cache instance.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the cache instance in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the resource, including the project number, bucket name and anywhere cache ID.", + "type": "string" + }, + "kind": { + "default": "storage#anywhereCache", + "description": "The kind of item this is. For Anywhere Cache, this is always storage#anywhereCache.", + "type": "string" + }, + "pendingUpdate": { + "description": "True if the cache instance has an active Update long-running operation.", + "type": "boolean" + }, + "selfLink": { + "description": "The link to this cache instance.", + "type": "string" + }, + "state": { + "description": "The current state of the cache instance.", + "type": "string" + }, + "ttl": { + "description": "The TTL of all cache entries in whole seconds. e.g., \"7200s\". ", + "format": "google-duration", + "type": "string" + }, + "updateTime": { + "description": "The modification time of the cache instance metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "AnywhereCaches": { + "description": "A list of Anywhere Caches.", + "id": "AnywhereCaches", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "AnywhereCache" + }, + "type": "array" + }, + "kind": { + "default": "storage#anywhereCaches", + "description": "The kind of item this is. For lists of Anywhere Caches, this is always storage#anywhereCaches.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, "Bucket": { "description": "A bucket.", "id": "Bucket", @@ -3036,6 +3903,15 @@ "description": "Whether or not Autoclass is enabled on this bucket", "type": "boolean" }, + "terminalStorageClass": { + "description": "The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.", + "type": "string" + }, + "terminalStorageClassUpdateTime": { + "description": "A date and time in RFC 3339 format representing the time of the most recent update to \"terminalStorageClass\".", + "format": "date-time", + "type": "string" + }, "toggleTime": { "description": "A date and time in RFC 3339 format representing the instant at which \"enabled\" was last toggled.", "format": "date-time", @@ -3319,6 +4195,16 @@ "description": "The name of the bucket.", "type": "string" }, + "objectRetention": { + "description": "The bucket's object retention config.", + "properties": { + "mode": { + "description": "The bucket's object retention mode. Can be Enabled.", + "type": "string" + } + }, + "type": "object" + }, "owner": { "description": "The owner of the bucket. This is always the project team's owner group.", "properties": { @@ -3370,6 +4256,22 @@ "description": "The URI of this bucket.", "type": "string" }, + "softDeletePolicy": { + "description": "The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted.", + "properties": { + "effectiveTime": { + "description": "Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "retentionDurationSeconds": { + "description": "The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "storageClass": { "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", "type": "string" @@ -3525,6 +4427,38 @@ }, "type": "object" }, + "BulkRestoreObjectsRequest": { + "description": "A bulk restore objects request.", + "id": "BulkRestoreObjectsRequest", + "properties": { + "allowOverwrite": { + "description": "If false (default), the restore will not overwrite live objects with the same name at the destination. This means some deleted objects may be skipped. If true, live objects will be overwritten resulting in a noncurrent object (if versioning is enabled). If versioning is not enabled, overwriting the object will result in a soft-deleted object. In either case, if a noncurrent object already exists with the same name, a live version can be written without issue.", + "type": "boolean" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "type": "boolean" + }, + "matchGlobs": { + "description": "Restores only the objects matching any of the specified glob(s). If this parameter is not specified, all objects will be restored within the specified time range.", + "items": { + "type": "string" + }, + "type": "array" + }, + "softDeletedAfterTime": { + "description": "Restores only the objects that were soft-deleted after this time.", + "format": "date-time", + "type": "string" + }, + "softDeletedBeforeTime": { + "description": "Restores only the objects that were soft-deleted before this time.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "Channel": { "description": "An notification channel used to watch for resource changes.", "id": "Channel", @@ -3656,6 +4590,86 @@ }, "type": "object" }, + "GoogleLongrunningListOperationsResponse": { + "description": "The response message for storage.buckets.operations.list.", + "id": "GoogleLongrunningListOperationsResponse", + "properties": { + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "GoogleLongrunningOperation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "GoogleLongrunningOperation", + "properties": { + "done": { + "description": "If the value is \"false\", it means the operation is still in progress. If \"true\", the operation is completed, and either \"error\" or \"response\" is available.", + "type": "boolean" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the \"name\" should be a resource name ending with \"operations/{operationId}\".", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", + "type": "object" + } + }, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The \"Status\" type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each \"Status\" message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "GoogleRpcStatus", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English.", + "type": "string" + } + }, + "type": "object" + }, "HmacKey": { "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", "id": "HmacKey", @@ -3749,6 +4763,72 @@ }, "type": "object" }, + "ManagedFolder": { + "description": "A managed folder.", + "id": "ManagedFolder", + "properties": { + "bucket": { + "description": "The name of the bucket containing this managed folder.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the managed folder in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the managed folder, including the bucket name and managed folder name.", + "type": "string" + }, + "kind": { + "default": "storage#managedFolder", + "description": "The kind of item this is. For managed folders, this is always storage#managedFolder.", + "type": "string" + }, + "metageneration": { + "description": "The version of the metadata for this managed folder. Used for preconditions and for detecting changes in metadata.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the managed folder. Required if not specified by URL parameter.", + "type": "string" + }, + "selfLink": { + "description": "The link to this managed folder.", + "type": "string" + }, + "updateTime": { + "description": "The last update time of the managed folder metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ManagedFolders": { + "description": "A list of managed folders.", + "id": "ManagedFolders", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ManagedFolder" + }, + "type": "array" + }, + "kind": { + "default": "storage#managedFolders", + "description": "The kind of item this is. For lists of managed folders, this is always storage#managedFolders.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, "Notification": { "description": "A subscription to receive Google PubSub notifications.", "id": "Notification", @@ -3910,6 +4990,11 @@ "format": "int64", "type": "string" }, + "hardDeleteTime": { + "description": "This is the time (in the future) when the soft-deleted object will no longer be restorable. It is equal to the soft delete time plus the current soft delete retention duration of the bucket.", + "format": "date-time", + "type": "string" + }, "id": { "description": "The ID of the object, including the bucket name, object name, and generation number.", "type": "string" @@ -3962,6 +5047,21 @@ }, "type": "object" }, + "retention": { + "description": "A collection of object level retention parameters.", + "properties": { + "mode": { + "description": "The bucket's object retention mode, can only be Unlocked or Locked.", + "type": "string" + }, + "retainUntilTime": { + "description": "A time in RFC 3339 format until which object retention protects this object.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "retentionExpirationTime": { "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).", "format": "date-time", @@ -3976,6 +5076,11 @@ "format": "uint64", "type": "string" }, + "softDeleteTime": { + "description": "The time at which the object became soft-deleted in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "storageClass": { "description": "Storage class of the object.", "type": "string" @@ -3990,7 +5095,7 @@ "type": "string" }, "timeDeleted": { - "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "description": "The time at which the object became noncurrent in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", "format": "date-time", "type": "string" }, @@ -4140,14 +5245,15 @@ "type": "object" }, "Policy": { - "description": "A bucket/object IAM policy.", + "description": "A bucket/object/managedFolder IAM policy.", "id": "Policy", "properties": { "bindings": { "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", @@ -4161,7 +5267,8 @@ "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", @@ -4174,7 +5281,8 @@ "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", @@ -4196,7 +5304,7 @@ "type": "string" }, "resourceId": { - "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, projects/_/buckets/bucket/objects/object for objects, and projects/_/buckets/bucket/managedFolders/managedFolder. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", "type": "string" }, "version": { @@ -4258,7 +5366,7 @@ "type": "object" }, "TestIamPermissionsResponse": { - "description": "A storage.(buckets|objects).testIamPermissions response.", + "description": "A storage.(buckets|objects|managedFolders).testIamPermissions response.", "id": "TestIamPermissionsResponse", "properties": { "kind": { @@ -4267,7 +5375,7 @@ "type": "string" }, "permissions": { - "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets, objects, or managedFolders. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata. \n- storage.managedFolders.delete — Delete managed folder. \n- storage.managedFolders.get — Read managed folder metadata. \n- storage.managedFolders.getIamPolicy — Read managed folder IAM policy. \n- storage.managedFolders.create — Create managed folder. \n- storage.managedFolders.list — List managed folders. \n- storage.managedFolders.setIamPolicy — Update managed folder IAM policy.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index e11bf2e6d3..90320fcc0b 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -10,6 +10,17 @@ // // For product documentation, see: https://developers.google.com/storage/docs/json_api/ // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -19,28 +30,31 @@ // ctx := context.Background() // storageService, err := storage.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // // storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package storage // import "google.golang.org/api/storage/v1" import ( @@ -141,13 +155,16 @@ func New(client *http.Client) (*Service, error) { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} + s.AnywhereCache = NewAnywhereCacheService(s) s.BucketAccessControls = NewBucketAccessControlsService(s) s.Buckets = NewBucketsService(s) s.Channels = NewChannelsService(s) s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.ManagedFolders = NewManagedFoldersService(s) s.Notifications = NewNotificationsService(s) s.ObjectAccessControls = NewObjectAccessControlsService(s) s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) s.Projects = NewProjectsService(s) return s, nil } @@ -157,6 +174,8 @@ type Service struct { BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment + AnywhereCache *AnywhereCacheService + BucketAccessControls *BucketAccessControlsService Buckets *BucketsService @@ -165,12 +184,16 @@ type Service struct { DefaultObjectAccessControls *DefaultObjectAccessControlsService + ManagedFolders *ManagedFoldersService + Notifications *NotificationsService ObjectAccessControls *ObjectAccessControlsService Objects *ObjectsService + Operations *OperationsService + Projects *ProjectsService } @@ -181,6 +204,15 @@ func (s *Service) userAgent() string { return googleapi.UserAgent + " " + s.UserAgent } +func NewAnywhereCacheService(s *Service) *AnywhereCacheService { + rs := &AnywhereCacheService{s: s} + return rs +} + +type AnywhereCacheService struct { + s *Service +} + func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { rs := &BucketAccessControlsService{s: s} return rs @@ -217,6 +249,15 @@ type DefaultObjectAccessControlsService struct { s *Service } +func NewManagedFoldersService(s *Service) *ManagedFoldersService { + rs := &ManagedFoldersService{s: s} + return rs +} + +type ManagedFoldersService struct { + s *Service +} + func NewNotificationsService(s *Service) *NotificationsService { rs := &NotificationsService{s: s} return rs @@ -244,6 +285,15 @@ type ObjectsService struct { s *Service } +func NewOperationsService(s *Service) *OperationsService { + rs := &OperationsService{s: s} + return rs +} + +type OperationsService struct { + s *Service +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.HmacKeys = NewProjectsHmacKeysService(s) @@ -277,6 +327,115 @@ type ProjectsServiceAccountService struct { s *Service } +// AnywhereCache: An Anywhere Cache instance. +type AnywhereCache struct { + // AdmissionPolicy: The cache-level entry admission policy. + AdmissionPolicy string `json:"admissionPolicy,omitempty"` + + // AnywhereCacheId: The ID of the Anywhere cache instance. + AnywhereCacheId string `json:"anywhereCacheId,omitempty"` + + // Bucket: The name of the bucket containing this cache instance. + Bucket string `json:"bucket,omitempty"` + + // CreateTime: The creation time of the cache instance in RFC 3339 + // format. + CreateTime string `json:"createTime,omitempty"` + + // Id: The ID of the resource, including the project number, bucket name + // and anywhere cache ID. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For Anywhere Cache, this is always + // storage#anywhereCache. + Kind string `json:"kind,omitempty"` + + // PendingUpdate: True if the cache instance has an active Update + // long-running operation. + PendingUpdate bool `json:"pendingUpdate,omitempty"` + + // SelfLink: The link to this cache instance. + SelfLink string `json:"selfLink,omitempty"` + + // State: The current state of the cache instance. + State string `json:"state,omitempty"` + + // Ttl: The TTL of all cache entries in whole seconds. e.g., "7200s". + Ttl string `json:"ttl,omitempty"` + + // UpdateTime: The modification time of the cache instance metadata in + // RFC 3339 format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AdmissionPolicy") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AdmissionPolicy") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AnywhereCache) MarshalJSON() ([]byte, error) { + type NoMethod AnywhereCache + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AnywhereCaches: A list of Anywhere Caches. +type AnywhereCaches struct { + // Items: The list of items. + Items []*AnywhereCache `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of Anywhere Caches, this is + // always storage#anywhereCaches. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AnywhereCaches) MarshalJSON() ([]byte, error) { + type NoMethod AnywhereCaches + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Bucket: A bucket. type Bucket struct { // Acl: Access controls on the bucket. @@ -359,6 +518,9 @@ type Bucket struct { // Name: The name of the bucket. Name string `json:"name,omitempty"` + // ObjectRetention: The bucket's object retention config. + ObjectRetention *BucketObjectRetention `json:"objectRetention,omitempty"` + // Owner: The owner of the bucket. This is always the project team's // owner group. Owner *BucketOwner `json:"owner,omitempty"` @@ -389,6 +551,11 @@ type Bucket struct { // SelfLink: The URI of this bucket. SelfLink string `json:"selfLink,omitempty"` + // SoftDeletePolicy: The bucket's soft delete policy, which defines the + // period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. + SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines // how objects in the bucket are stored and determines the SLA and the @@ -444,6 +611,16 @@ type BucketAutoclass struct { // Enabled: Whether or not Autoclass is enabled on this bucket Enabled bool `json:"enabled,omitempty"` + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string `json:"terminalStorageClass,omitempty"` + + // TerminalStorageClassUpdateTime: A date and time in RFC 3339 format + // representing the time of the most recent update to + // "terminalStorageClass". + TerminalStorageClassUpdateTime string `json:"terminalStorageClassUpdateTime,omitempty"` + // ToggleTime: A date and time in RFC 3339 format representing the // instant at which "enabled" was last toggled. ToggleTime string `json:"toggleTime,omitempty"` @@ -946,6 +1123,34 @@ func (s *BucketLogging) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketObjectRetention: The bucket's object retention config. +type BucketObjectRetention struct { + // Mode: The bucket's object retention mode. Can be Enabled. + Mode string `json:"mode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod BucketObjectRetention + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketOwner: The owner of the bucket. This is always the project // team's owner group. type BucketOwner struct { @@ -1027,6 +1232,43 @@ func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketSoftDeletePolicy: The bucket's soft delete policy, which +// defines the period of time that soft-deleted objects will be +// retained, and cannot be permanently deleted. +type BucketSoftDeletePolicy struct { + // EffectiveTime: Server-determined value that indicates the time from + // which the policy, or one with a greater retention, was effective. + // This value is in RFC 3339 format. + EffectiveTime string `json:"effectiveTime,omitempty"` + + // RetentionDurationSeconds: The duration in seconds that soft-deleted + // objects in the bucket will be retained and cannot be permanently + // deleted. + RetentionDurationSeconds int64 `json:"retentionDurationSeconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EffectiveTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketSoftDeletePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketVersioning: The bucket's versioning configuration. type BucketVersioning struct { // Enabled: While set to true, versioning is fully enabled for this @@ -1282,6 +1524,59 @@ func (s *Buckets) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BulkRestoreObjectsRequest: A bulk restore objects request. +type BulkRestoreObjectsRequest struct { + // AllowOverwrite: If false (default), the restore will not overwrite + // live objects with the same name at the destination. This means some + // deleted objects may be skipped. If true, live objects will be + // overwritten resulting in a noncurrent object (if versioning is + // enabled). If versioning is not enabled, overwriting the object will + // result in a soft-deleted object. In either case, if a noncurrent + // object already exists with the same name, a live version can be + // written without issue. + AllowOverwrite bool `json:"allowOverwrite,omitempty"` + + // CopySourceAcl: If true, copies the source object's ACL; otherwise, + // uses the bucket's default object ACL. The default is false. + CopySourceAcl bool `json:"copySourceAcl,omitempty"` + + // MatchGlobs: Restores only the objects matching any of the specified + // glob(s). If this parameter is not specified, all objects will be + // restored within the specified time range. + MatchGlobs []string `json:"matchGlobs,omitempty"` + + // SoftDeletedAfterTime: Restores only the objects that were + // soft-deleted after this time. + SoftDeletedAfterTime string `json:"softDeletedAfterTime,omitempty"` + + // SoftDeletedBeforeTime: Restores only the objects that were + // soft-deleted before this time. + SoftDeletedBeforeTime string `json:"softDeletedBeforeTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowOverwrite") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowOverwrite") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { + type NoMethod BulkRestoreObjectsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Channel: An notification channel used to watch for resource changes. type Channel struct { // Address: The address where notifications are delivered for this @@ -1498,6 +1793,150 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleLongrunningListOperationsResponse: The response message for +// storage.buckets.operations.list. +type GoogleLongrunningListOperationsResponse struct { + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: A list of operations that matches the specified filter in + // the request. + Operations []*GoogleLongrunningOperation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningListOperationsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleLongrunningOperation: This resource represents a long-running +// operation that is the result of a network API call. +type GoogleLongrunningOperation struct { + // Done: If the value is "false", it means the operation is still in + // progress. If "true", the operation is completed, and either "error" + // or "response" is available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *GoogleRpcStatus `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that originally returns it. If you use the default HTTP + // mapping, the "name" should be a resource name ending with + // "operations/{operationId}". + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as "Delete", the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type "XxxResponse", where "Xxx" + // is the original method name. For example, if the original method name + // is "TakeSnapshot()", the inferred response type is + // "TakeSnapshotResponse". + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleRpcStatus: The "Status" type defines a logical error model that +// is suitable for different programming environments, including REST +// APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// "Status" message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type GoogleRpcStatus struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { + type NoMethod GoogleRpcStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HmacKey: JSON template to produce a JSON-style HMAC Key resource for // Create responses. type HmacKey struct { @@ -1645,11 +2084,111 @@ func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Notification: A subscription to receive Google PubSub notifications. -type Notification struct { - // CustomAttributes: An optional list of additional attributes to attach - // to each Cloud PubSub message published for this notification - // subscription. +// ManagedFolder: A managed folder. +type ManagedFolder struct { + // Bucket: The name of the bucket containing this managed folder. + Bucket string `json:"bucket,omitempty"` + + // CreateTime: The creation time of the managed folder in RFC 3339 + // format. + CreateTime string `json:"createTime,omitempty"` + + // Id: The ID of the managed folder, including the bucket name and + // managed folder name. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For managed folders, this is always + // storage#managedFolder. + Kind string `json:"kind,omitempty"` + + // Metageneration: The version of the metadata for this managed folder. + // Used for preconditions and for detecting changes in metadata. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the managed folder. Required if not specified by + // URL parameter. + Name string `json:"name,omitempty"` + + // SelfLink: The link to this managed folder. + SelfLink string `json:"selfLink,omitempty"` + + // UpdateTime: The last update time of the managed folder metadata in + // RFC 3339 format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedFolder) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolder + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ManagedFolders: A list of managed folders. +type ManagedFolders struct { + // Items: The list of items. + Items []*ManagedFolder `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of managed folders, this is + // always storage#managedFolders. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedFolders) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolders + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notification: A subscription to receive Google PubSub notifications. +type Notification struct { + // CustomAttributes: An optional list of additional attributes to attach + // to each Cloud PubSub message published for this notification + // subscription. CustomAttributes map[string]string `json:"custom_attributes,omitempty"` // Etag: HTTP 1.1 Entity tag for this subscription notification. @@ -1812,6 +2351,12 @@ type Object struct { // versioning. Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: This is the time (in the future) when the + // soft-deleted object will no longer be restorable. It is equal to the + // soft delete time plus the current soft delete retention duration of + // the bucket. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` + // Id: The ID of the object, including the bucket name, object name, and // generation number. Id string `json:"id,omitempty"` @@ -1849,6 +2394,9 @@ type Object struct { // the object. Owner *ObjectOwner `json:"owner,omitempty"` + // Retention: A collection of object level retention parameters. + Retention *ObjectRetention `json:"retention,omitempty"` + // RetentionExpirationTime: A server-determined value that specifies the // earliest time that the object's retention period expires. This value // is in RFC 3339 format. Note 1: This field is not provided for objects @@ -1864,6 +2412,10 @@ type Object struct { // Size: Content-Length of the data in bytes. Size uint64 `json:"size,omitempty,string"` + // SoftDeleteTime: The time at which the object became soft-deleted in + // RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` + // StorageClass: Storage class of the object. StorageClass string `json:"storageClass,omitempty"` @@ -1879,9 +2431,9 @@ type Object struct { // TimeCreated: The creation time of the object in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` - // TimeDeleted: The deletion time of the object in RFC 3339 format. Will - // be returned if and only if this version of the object has been - // deleted. + // TimeDeleted: The time at which the object became noncurrent in RFC + // 3339 format. Will be returned if and only if this version of the + // object has been deleted. TimeDeleted string `json:"timeDeleted,omitempty"` // TimeStorageClassUpdated: The time at which the object's storage class @@ -1990,6 +2542,39 @@ func (s *ObjectOwner) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ObjectRetention: A collection of object level retention parameters. +type ObjectRetention struct { + // Mode: The bucket's object retention mode, can only be Unlocked or + // Locked. + Mode string `json:"mode,omitempty"` + + // RetainUntilTime: A time in RFC 3339 format until which object + // retention protects this object. + RetainUntilTime string `json:"retainUntilTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod ObjectRetention + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ObjectAccessControl: An access-control entry. type ObjectAccessControl struct { // Bucket: The name of the bucket. @@ -2187,7 +2772,7 @@ func (s *Objects) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Policy: A bucket/object IAM policy. +// Policy: A bucket/object/managedFolder IAM policy. type Policy struct { // Bindings: An association between a role, which comes with a set of // permissions, and members who may assume that role. @@ -2201,8 +2786,9 @@ type Policy struct { Kind string `json:"kind,omitempty"` // ResourceId: The ID of the resource to which this policy belongs. Will - // be of the form projects/_/buckets/bucket for buckets, and - // projects/_/buckets/bucket/objects/object for objects. A specific + // be of the form projects/_/buckets/bucket for buckets, + // projects/_/buckets/bucket/objects/object for objects, and + // projects/_/buckets/bucket/managedFolders/managedFolder. A specific // generation may be specified by appending #generationNumber to the end // of the object name, e.g. // projects/_/buckets/my-bucket/objects/data.txt#17. The current @@ -2419,15 +3005,15 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { } // TestIamPermissionsResponse: A -// storage.(buckets|objects).testIamPermissions response. +// storage.(buckets|objects|managedFolders).testIamPermissions response. type TestIamPermissionsResponse struct { // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // Permissions: The permissions held by the caller. Permissions are // always of the format storage.resource.capability, where resource is - // one of buckets or objects. The supported permissions are as follows: - // + // one of buckets, objects, or managedFolders. The supported permissions + // are as follows: // - storage.buckets.delete — Delete bucket. // - storage.buckets.get — Read bucket metadata. // - storage.buckets.getIamPolicy — Read bucket IAM policy. @@ -2442,6 +3028,14 @@ type TestIamPermissionsResponse struct { // - storage.objects.list — List objects. // - storage.objects.setIamPolicy — Update object IAM policy. // - storage.objects.update — Update object metadata. + // - storage.managedFolders.delete — Delete managed folder. + // - storage.managedFolders.get — Read managed folder metadata. + // - storage.managedFolders.getIamPolicy — Read managed folder IAM + // policy. + // - storage.managedFolders.create — Create managed folder. + // - storage.managedFolders.list — List managed folders. + // - storage.managedFolders.setIamPolicy — Update managed folder IAM + // policy. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -2471,42 +3065,32 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// method id "storage.bucketAccessControls.delete": +// method id "storage.anywhereCaches.disable": -type BucketAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheDisableCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified bucket. +// Disable: Disables an Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { - c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Disable(bucket string, anywhereCacheId string) *AnywhereCacheDisableCall { + c := &AnywhereCacheDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { +func (c *AnywhereCacheDisableCall) Fields(s ...googleapi.Field) *AnywhereCacheDisableCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2514,21 +3098,21 @@ func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { +func (c *AnywhereCacheDisableCall) Context(ctx context.Context) *AnywhereCacheDisableCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsDeleteCall) Header() http.Header { +func (c *AnywhereCacheDisableCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheDisableCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2538,105 +3122,119 @@ func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.delete" call. -func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.anywhereCaches.disable" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AnywhereCacheDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &AnywhereCache{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.bucketAccessControls.delete", + // "description": "Disables an Anywhere Cache instance.", + // "httpMethod": "POST", + // "id": "storage.anywhereCaches.disable", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/disable", + // "response": { + // "$ref": "AnywhereCache" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.get": +// method id "storage.anywhereCaches.get": -type BucketAccessControlsGetCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type AnywhereCacheGetCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Returns the ACL entry for the specified entity on the specified -// bucket. +// Get: Returns the metadata of an Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { - c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Get(bucket string, anywhereCacheId string) *AnywhereCacheGetCall { + c := &AnywhereCacheGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { +func (c *AnywhereCacheGetCall) Fields(s ...googleapi.Field) *AnywhereCacheGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2646,7 +3244,7 @@ func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccess // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { +func (c *AnywhereCacheGetCall) IfNoneMatch(entityTag string) *AnywhereCacheGetCall { c.ifNoneMatch_ = entityTag return c } @@ -2654,21 +3252,21 @@ func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAcces // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { +func (c *AnywhereCacheGetCall) Context(ctx context.Context) *AnywhereCacheGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsGetCall) Header() http.Header { +func (c *AnywhereCacheGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2681,7 +3279,7 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -2689,20 +3287,20 @@ func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.get" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.get" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCacheGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2721,7 +3319,7 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &AnywhereCache{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2733,76 +3331,67 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA } return ret, nil // { - // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "description": "Returns the metadata of an Anywhere Cache instance.", // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.get", + // "id": "storage.anywhereCaches.get", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] // } } -// method id "storage.bucketAccessControls.insert": +// method id "storage.anywhereCaches.insert": -type BucketAccessControlsInsertCall struct { - s *Service - bucket string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheInsertCall struct { + s *Service + bucket string + anywherecache *AnywhereCache + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new ACL entry on the specified bucket. +// Insert: Creates an Anywhere Cache instance. // -// - bucket: Name of a bucket. -func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { - c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCacheInsertCall { + c := &AnywhereCacheInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { - c.urlParams_.Set("userProject", userProject) + c.anywherecache = anywherecache return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { +func (c *AnywhereCacheInsertCall) Fields(s ...googleapi.Field) *AnywhereCacheInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2810,21 +3399,21 @@ func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { +func (c *AnywhereCacheInsertCall) Context(ctx context.Context) *AnywhereCacheInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsInsertCall) Header() http.Header { +func (c *AnywhereCacheInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2832,14 +3421,14 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -2852,14 +3441,14 @@ func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.insert" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.insert" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCacheInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -2878,7 +3467,7 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &GoogleLongrunningOperation{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -2890,43 +3479,39 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck } return ret, nil // { - // "description": "Creates a new ACL entry on the specified bucket.", + // "description": "Creates an Anywhere Cache instance.", // "httpMethod": "POST", - // "id": "storage.bucketAccessControls.insert", + // "id": "storage.anywhereCaches.insert", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl", + // "path": "b/{bucket}/anywhereCaches", // "request": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "GoogleLongrunningOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.list": +// method id "storage.anywhereCaches.list": -type BucketAccessControlsListCall struct { +type AnywhereCacheListCall struct { s *Service bucket string urlParams_ gensupport.URLParams @@ -2935,26 +3520,35 @@ type BucketAccessControlsListCall struct { header_ http.Header } -// List: Retrieves ACL entries on the specified bucket. +// List: Returns a list of Anywhere Cache instances of the bucket +// matching the criteria. // -// - bucket: Name of a bucket. -func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { - c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) List(bucket string) *AnywhereCacheListCall { + c := &AnywhereCacheListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { - c.urlParams_.Set("userProject", userProject) +// PageSize sets the optional parameter "pageSize": Maximum number of +// items return in a single page of responses. Maximum 1000. +func (c *AnywhereCacheListCall) PageSize(pageSize int64) *AnywhereCacheListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *AnywhereCacheListCall) PageToken(pageToken string) *AnywhereCacheListCall { + c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { +func (c *AnywhereCacheListCall) Fields(s ...googleapi.Field) *AnywhereCacheListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -2964,7 +3558,7 @@ func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAcces // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { +func (c *AnywhereCacheListCall) IfNoneMatch(entityTag string) *AnywhereCacheListCall { c.ifNoneMatch_ = entityTag return c } @@ -2972,21 +3566,21 @@ func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAcce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { +func (c *AnywhereCacheListCall) Context(ctx context.Context) *AnywhereCacheListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsListCall) Header() http.Header { +func (c *AnywhereCacheListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -2999,7 +3593,7 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCache") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3012,14 +3606,14 @@ func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, er return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.list" call. -// Exactly one of *BucketAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.list" call. +// Exactly one of *AnywhereCaches or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCaches.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { +func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3038,7 +3632,7 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControls{ + ret := &AnywhereCaches{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3050,74 +3644,94 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket } return ret, nil // { - // "description": "Retrieves ACL entries on the specified bucket.", + // "description": "Returns a list of Anywhere Cache instances of the bucket matching the criteria.", // "httpMethod": "GET", - // "id": "storage.bucketAccessControls.list", + // "id": "storage.anywhereCaches.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "pageSize": { + // "description": "Maximum number of items return in a single page of responses. Maximum 1000.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/acl", + // "path": "b/{bucket}/anywhereCache", // "response": { - // "$ref": "BucketAccessControls" + // "$ref": "AnywhereCaches" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.patch": +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *AnywhereCacheListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} -type BucketAccessControlsPatchCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// method id "storage.anywhereCaches.pause": + +type AnywhereCachePauseCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches an ACL entry on the specified bucket. +// Pause: Pauses an Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { - c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Pause(bucket string, anywhereCacheId string) *AnywhereCachePauseCall { + c := &AnywhereCachePauseCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { +func (c *AnywhereCachePauseCall) Fields(s ...googleapi.Field) *AnywhereCachePauseCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3125,21 +3739,21 @@ func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAcce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { +func (c *AnywhereCachePauseCall) Context(ctx context.Context) *AnywhereCachePauseCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsPatchCall) Header() http.Header { +func (c *AnywhereCachePauseCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCachePauseCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3147,35 +3761,30 @@ func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.patch" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.pause" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCachePauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3194,7 +3803,7 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &AnywhereCache{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3206,84 +3815,66 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke } return ret, nil // { - // "description": "Patches an ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.bucketAccessControls.patch", + // "description": "Pauses an Anywhere Cache instance.", + // "httpMethod": "POST", + // "id": "storage.anywhereCaches.pause", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/pause", // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.bucketAccessControls.update": +// method id "storage.anywhereCaches.resume": -type BucketAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - bucketaccesscontrol *BucketAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheResumeCall struct { + s *Service + bucket string + anywhereCacheId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Update: Updates an ACL entry on the specified bucket. +// Resume: Resumes a paused or disabled Anywhere Cache instance. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { - c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Resume(bucket string, anywhereCacheId string) *AnywhereCacheResumeCall { + c := &AnywhereCacheResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.bucketaccesscontrol = bucketaccesscontrol - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { +func (c *AnywhereCacheResumeCall) Fields(s ...googleapi.Field) *AnywhereCacheResumeCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3291,21 +3882,21 @@ func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { +func (c *AnywhereCacheResumeCall) Context(ctx context.Context) *AnywhereCacheResumeCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketAccessControlsUpdateCall) Header() http.Header { +func (c *AnywhereCacheResumeCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheResumeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3313,35 +3904,30 @@ func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.bucketAccessControls.update" call. -// Exactly one of *BucketAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *BucketAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.anywhereCaches.resume" call. +// Exactly one of *AnywhereCache or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *AnywhereCache.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { +func (c *AnywhereCacheResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3360,7 +3946,7 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &BucketAccessControl{ + ret := &AnywhereCache{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3372,93 +3958,69 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck } return ret, nil // { - // "description": "Updates an ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.bucketAccessControls.update", + // "description": "Resumes a paused or disabled Anywhere Cache instance.", + // "httpMethod": "POST", + // "id": "storage.anywhereCaches.resume", // "parameterOrder": [ // "bucket", - // "entity" + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "bucket": { + // "description": "Name of the partent bucket", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/acl/{entity}", - // "request": { - // "$ref": "BucketAccessControl" - // }, + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}/resume", // "response": { - // "$ref": "BucketAccessControl" + // "$ref": "AnywhereCache" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.buckets.delete": +// method id "storage.anywhereCaches.update": -type BucketsDeleteCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type AnywhereCacheUpdateCall struct { + s *Service + bucket string + anywhereCacheId string + anywherecache *AnywhereCache + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes an empty bucket. +// Update: Updates the config(ttl and admissionPolicy) of an Anywhere +// Cache instance. // -// - bucket: Name of a bucket. -func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { - c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - anywhereCacheId: The ID of requested Anywhere Cache instance. +// - bucket: Name of the partent bucket. +func (r *AnywhereCacheService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCacheUpdateCall { + c := &AnywhereCacheUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If set, only deletes the bucket if its -// metageneration matches this value. -func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If set, only deletes the bucket if its -// metageneration does not match this value. -func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { - c.urlParams_.Set("userProject", userProject) + c.anywhereCacheId = anywhereCacheId + c.anywherecache = anywherecache return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { +func (c *AnywhereCacheUpdateCall) Fields(s ...googleapi.Field) *AnywhereCacheUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3466,21 +4028,21 @@ func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { +func (c *AnywhereCacheUpdateCall) Context(ctx context.Context) *AnywhereCacheUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsDeleteCall) Header() http.Header { +func (c *AnywhereCacheUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *AnywhereCacheUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3488,66 +4050,93 @@ func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.anywherecache) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/anywhereCaches/{anywhereCacheId}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, + "bucket": c.bucket, + "anywhereCacheId": c.anywhereCacheId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.delete" call. -func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.anywhereCaches.update" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *AnywhereCacheUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes an empty bucket.", - // "httpMethod": "DELETE", - // "id": "storage.buckets.delete", + // "description": "Updates the config(ttl and admissionPolicy) of an Anywhere Cache instance.", + // "httpMethod": "PATCH", + // "id": "storage.anywhereCaches.update", // "parameterOrder": [ - // "bucket" + // "bucket", + // "anywhereCacheId" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", + // "anywhereCacheId": { + // "description": "The ID of requested Anywhere Cache instance.", // "location": "path", // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "If set, only deletes the bucket if its metageneration matches this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "If set, only deletes the bucket if its metageneration does not match this value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", + // "bucket": { + // "description": "Name of the partent bucket", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "b/{bucket}", + // "path": "b/{bucket}/anywhereCaches/{anywhereCacheId}", + // "request": { + // "$ref": "AnywhereCache" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/devstorage.full_control", @@ -3557,59 +4146,34 @@ func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { } -// method id "storage.buckets.get": +// method id "storage.bucketAccessControls.delete": -type BucketsGetCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Get: Returns metadata for the specified bucket. +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. // -// - bucket: Name of a bucket. -func (r *BucketsService) Get(bucket string) *BucketsGetCall { - c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { - c.urlParams_.Set("projection", projection) + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { +func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -3617,107 +4181,71 @@ func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsGetCall) Header() http.Header { +func (c *BucketAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.get" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Bucket{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Returns metadata for the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.buckets.get", - // "parameterOrder": [ - // "bucket" + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { @@ -3726,29 +4254,10 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, // "type": "string" // }, // "userProject": { @@ -3757,54 +4266,44 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // } // }, - // "path": "b/{bucket}", - // "response": { - // "$ref": "Bucket" - // }, + // "path": "b/{bucket}/acl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.buckets.getIamPolicy": +// method id "storage.bucketAccessControls.get": -type BucketsGetIamPolicyCall struct { +type BucketAccessControlsGetCall struct { s *Service bucket string + entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Returns an IAM policy for the specified bucket. +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. // -// - bucket: Name of a bucket. -func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { - c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// OptionsRequestedPolicyVersion sets the optional parameter -// "optionsRequestedPolicyVersion": The IAM policy format version to be -// returned. If the optionsRequestedPolicyVersion is for an older -// version that doesn't support part of the requested IAM policy, the -// request fails. -func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { - c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } @@ -3812,7 +4311,7 @@ func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -3822,7 +4321,7 @@ func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPol // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -3830,21 +4329,21 @@ func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsGetIamPolicyCall) Header() http.Header { +func (c *BucketAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -3857,7 +4356,7 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -3866,18 +4365,19 @@ func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -3896,7 +4396,7 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -3908,11 +4408,12 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Returns an IAM policy for the specified bucket.", + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.buckets.getIamPolicy", + // "id": "storage.bucketAccessControls.get", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { @@ -3921,12 +4422,11 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, - // "optionsRequestedPolicyVersion": { - // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", - // "format": "int32", - // "location": "query", - // "minimum": "1", - // "type": "integer" + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", @@ -3934,9 +4434,9 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // } // }, - // "path": "b/{bucket}/iam", + // "path": "b/{bucket}/acl/{entity}", // "response": { - // "$ref": "Policy" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -3946,100 +4446,30 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } -// method id "storage.buckets.insert": +// method id "storage.bucketAccessControls.insert": -type BucketsInsertCall struct { - s *Service - bucket *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a new bucket. +// Insert: Creates a new ACL entry on the specified bucket. // -// - project: A valid API project identifier. -func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { - c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) +// - bucket: Name of a bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the bucket resource -// specifies acl or defaultObjectAcl properties, when it defaults to -// full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { - c.urlParams_.Set("projection", projection) + c.bucketaccesscontrol = bucketaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4047,7 +4477,7 @@ func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4055,21 +4485,21 @@ func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsInsertCall) Header() http.Header { +func (c *BucketAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4077,37 +4507,40 @@ func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.insert" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } return nil, gensupport.WrapError(&googleapi.Error{ Code: res.StatusCode, Header: res.Header, @@ -4120,7 +4553,7 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4132,151 +4565,63 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } return ret, nil // { - // "description": "Creates a new bucket.", + // "description": "Creates a new ACL entry on the specified bucket.", // "httpMethod": "POST", - // "id": "storage.buckets.insert", + // "id": "storage.bucketAccessControls.insert", // "parameterOrder": [ - // "project" + // "bucket" // ], // "parameters": { - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", // "required": true, // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, // "userProject": { - // "description": "The project to be billed for this request.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b", + // "path": "b/{bucket}/acl", // "request": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "response": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.buckets.list": +// method id "storage.bucketAccessControls.list": -type BucketsListCall struct { +type BucketAccessControlsListCall struct { s *Service + bucket string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of buckets for a given project. -// -// - project: A valid API project identifier. -func (r *BucketsService) List(projectid string) *BucketsListCall { - c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.urlParams_.Set("project", projectid) - return c -} - -// MaxResults sets the optional parameter "maxResults": Maximum number -// of buckets to return in a single response. The service will use this -// parameter or 1,000 items, whichever is smaller. -func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) - return c -} - -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { - c.urlParams_.Set("pageToken", pageToken) - return c -} - -// Prefix sets the optional parameter "prefix": Filter results to -// buckets whose names begin with this prefix. -func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: +// List: Retrieves ACL entries on the specified bucket. // -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsListCall) Projection(projection string) *BucketsListCall { - c.urlParams_.Set("projection", projection) +// - bucket: Name of a bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. -func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4284,7 +4629,7 @@ func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4294,7 +4639,7 @@ func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } @@ -4302,21 +4647,21 @@ func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsListCall) Header() http.Header { +func (c *BucketAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4329,24 +4674,27 @@ func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.list" call. -// Exactly one of *Buckets or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Buckets.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4365,7 +4713,7 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Buckets{ + ret := &BucketAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4377,117 +4725,66 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { } return ret, nil // { - // "description": "Retrieves a list of buckets for a given project.", + // "description": "Retrieves ACL entries on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.buckets.list", + // "id": "storage.bucketAccessControls.list", // "parameterOrder": [ - // "project" + // "bucket" // ], // "parameters": { - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", - // "location": "query", - // "minimum": "0", - // "type": "integer" - // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", - // "location": "query", - // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to buckets whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "project": { - // "description": "A valid API project identifier.", - // "location": "query", + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", // "required": true, // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, // "userProject": { - // "description": "The project to be billed for this request.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b", + // "path": "b/{bucket}/acl", // "response": { - // "$ref": "Buckets" + // "$ref": "BucketAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.buckets.lockRetentionPolicy": +// method id "storage.bucketAccessControls.patch": -type BucketsLockRetentionPolicyCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// LockRetentionPolicy: Locks retention policy on a bucket. +// Patch: Patches an ACL entry on the specified bucket. // // - bucket: Name of a bucket. -// - ifMetagenerationMatch: Makes the operation conditional on whether -// bucket's current metageneration matches the given value. -func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { - c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { +func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4495,7 +4792,7 @@ func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *Bucket // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4503,21 +4800,21 @@ func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsLockRetentionPolicyCall) Header() http.Header { +func (c *BucketAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4525,29 +4822,35 @@ func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.lockRetentionPolicy" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4566,7 +4869,7 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4578,12 +4881,12 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck } return ret, nil // { - // "description": "Locks retention policy on a bucket.", - // "httpMethod": "POST", - // "id": "storage.buckets.lockRetentionPolicy", + // "description": "Patches an ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", // "parameterOrder": [ // "bucket", - // "ifMetagenerationMatch" + // "entity" // ], // "parameters": { // "bucket": { @@ -4592,10 +4895,9 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", // "required": true, // "type": "string" // }, @@ -4605,132 +4907,50 @@ func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Buck // "type": "string" // } // }, - // "path": "b/{bucket}/lockRetentionPolicy", + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, // "response": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.buckets.patch": +// method id "storage.bucketAccessControls.update": -type BucketsPatchCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. +// Update: Updates an ACL entry on the specified bucket. // -// - bucket: Name of a bucket. -func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { - c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { - c.urlParams_.Set("projection", projection) + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { +func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4738,7 +4958,7 @@ func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4746,21 +4966,21 @@ func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsPatchCall) Header() http.Header { +func (c *BucketAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4768,34 +4988,35 @@ func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.patch" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -4814,7 +5035,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &BucketAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -4826,11 +5047,12 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } return ret, nil // { - // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PATCH", - // "id": "storage.buckets.patch", + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { @@ -4839,69 +5061,10 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, // "type": "string" // }, // "userProject": { @@ -4910,12 +5073,12 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "type": "string" // } // }, - // "path": "b/{bucket}", + // "path": "b/{bucket}/acl/{entity}", // "request": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "response": { - // "$ref": "Bucket" + // "$ref": "BucketAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -4925,30 +5088,44 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } -// method id "storage.buckets.setIamPolicy": +// method id "storage.buckets.delete": -type BucketsSetIamPolicyCall struct { +type BucketsDeleteCall struct { s *Service bucket string - policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Updates an IAM policy for the specified bucket. +// Delete: Permanently deletes an empty bucket. // // - bucket: Name of a bucket. -func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { - c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.policy = policy + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { +func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -4956,7 +5133,7 @@ func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -4964,21 +5141,21 @@ func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPol // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsSetIamPolicyCall) Header() http.Header { +func (c *BucketsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -4986,16 +5163,11 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } @@ -5006,47 +5178,22 @@ func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Policy{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Updates an IAM policy for the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.buckets.setIamPolicy", + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", // "parameterOrder": [ // "bucket" // ], @@ -5057,30 +5204,37 @@ func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/iam", - // "request": { - // "$ref": "Policy" - // }, - // "response": { - // "$ref": "Policy" - // }, + // "path": "b/{bucket}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.buckets.testIamPermissions": +// method id "storage.buckets.get": -type BucketsTestIamPermissionsCall struct { +type BucketsGetCall struct { s *Service bucket string urlParams_ gensupport.URLParams @@ -5089,21 +5243,48 @@ type BucketsTestIamPermissionsCall struct { header_ http.Header } -// TestIamPermissions: Tests a set of permissions on the given bucket to -// see which, if any, are held by the caller. +// Get: Returns metadata for the specified bucket. // // - bucket: Name of a bucket. -// - permissions: Permissions to test. -func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { - c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5111,7 +5292,7 @@ func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *Buckets // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5121,7 +5302,7 @@ func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTes // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -5129,21 +5310,21 @@ func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTe // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsTestIamPermissionsCall) Header() http.Header { +func (c *BucketsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -5156,7 +5337,7 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -5169,14 +5350,14 @@ func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, e return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5195,7 +5376,7 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5207,12 +5388,11 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } return ret, nil // { - // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "description": "Returns metadata for the specified bucket.", // "httpMethod": "GET", - // "id": "storage.buckets.testIamPermissions", + // "id": "storage.buckets.get", // "parameterOrder": [ - // "bucket", - // "permissions" + // "bucket" // ], // "parameters": { // "bucket": { @@ -5221,11 +5401,29 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "required": true, // "type": "string" // }, - // "permissions": { - // "description": "Permissions to test.", + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], // "location": "query", - // "repeated": true, - // "required": true, // "type": "string" // }, // "userProject": { @@ -5234,9 +5432,9 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "type": "string" // } // }, - // "path": "b/{bucket}/iam/testPermissions", + // "path": "b/{bucket}", // "response": { - // "$ref": "TestIamPermissionsResponse" + // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5249,119 +5447,39 @@ func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } -// method id "storage.buckets.update": +// method id "storage.buckets.getIamPolicy": -type BucketsUpdateCall struct { - s *Service - bucket string - bucket2 *Bucket - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsGetIamPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a bucket. Changes to the bucket will be readable -// immediately after writing, but configuration changes may take time to -// propagate. +// GetIamPolicy: Returns an IAM policy for the specified bucket. // // - bucket: Name of a bucket. -func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { - c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { + c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.bucket2 = bucket2 - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration matches -// the given value. -func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the return of the bucket metadata -// conditional on whether the bucket's current metageneration does not -// match the given value. -func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Project team owners get OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "private" - Project team owners get OWNER access. -// "projectPrivate" - Project team members get access according to -// -// their roles. -// -// "publicRead" - Project team owners get OWNER access, and allUsers -// -// get READER access. -// -// "publicReadWrite" - Project team owners get OWNER access, and -// -// allUsers get WRITER access. -func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// PredefinedDefaultObjectAcl sets the optional parameter -// "predefinedDefaultObjectAcl": Apply a predefined set of default -// object access controls to this bucket. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { - c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit owner, acl and defaultObjectAcl properties. -func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { - c.urlParams_.Set("projection", projection) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { +func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5369,46 +5487,54 @@ func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { +func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { +func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *BucketsUpdateCall) Header() http.Header { +func (c *BucketsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -5419,14 +5545,14 @@ func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.buckets.update" call. -// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// Do executes the "storage.buckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Bucket.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { +func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5445,7 +5571,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Bucket{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5457,9 +5583,9 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } return ret, nil // { - // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", - // "httpMethod": "PUT", - // "id": "storage.buckets.update", + // "description": "Returns an IAM policy for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.getIamPolicy", // "parameterOrder": [ // "bucket" // ], @@ -5470,83 +5596,22 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "required": true, // "type": "string" // }, - // "ifMetagenerationMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", - // "format": "int64", + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", // "location": "query", - // "type": "string" + // "minimum": "1", + // "type": "integer" // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "private", - // "projectPrivate", - // "publicRead", - // "publicReadWrite" - // ], - // "enumDescriptions": [ - // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", - // "Project team owners get OWNER access.", - // "Project team members get access according to their roles.", - // "Project team owners get OWNER access, and allUsers get READER access.", - // "Project team owners get OWNER access, and allUsers get WRITER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "predefinedDefaultObjectAcl": { - // "description": "Apply a predefined set of default object access controls to this bucket.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit owner, acl and defaultObjectAcl properties." - // ], - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}", - // "request": { - // "$ref": "Bucket" - // }, + // "path": "b/{bucket}/iam", // "response": { - // "$ref": "Bucket" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -5556,133 +5621,108 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { } -// method id "storage.channels.stop": +// method id "storage.buckets.insert": -type ChannelsStopCall struct { +type BucketsInsertCall struct { s *Service - channel *Channel + bucket *Bucket urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Stop: Stop watching resources through this channel -func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { - c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.channel = channel +// Insert: Creates a new bucket. +// +// - project: A valid API project identifier. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) +// EnableObjectRetention sets the optional parameter +// "enableObjectRetention": When set to true, object retention is +// enabled for this bucket. +func (c *BucketsInsertCall) EnableObjectRetention(enableObjectRetention bool) *BucketsInsertCall { + c.urlParams_.Set("enableObjectRetention", fmt.Sprint(enableObjectRetention)) return c } -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { - c.ctx_ = ctx +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// +// their roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// +// get READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and +// +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *ChannelsStopCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "storage.channels.stop" call. -func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if err != nil { - return err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) - } - return nil - // { - // "description": "Stop watching resources through this channel", - // "httpMethod": "POST", - // "id": "storage.channels.stop", - // "path": "channels/stop", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.defaultObjectAccessControls.delete": - -type DefaultObjectAccessControlsDeleteCall struct { - s *Service - bucket string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c } -// Delete: Permanently deletes the default object ACL entry for the -// specified entity on the specified bucket. +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { - c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { +// be billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5690,7 +5730,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5698,21 +5738,21 @@ func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *De // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { +func (c *BucketsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -5720,99 +5760,212 @@ func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.delete" call. -func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil - // { - // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", - // "httpMethod": "DELETE", - // "id": "storage.defaultObjectAccessControls.delete", - // "parameterOrder": [ - // "bucket", - // "entity" + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, + // "enableObjectRetention": { + // "default": "false", + // "description": "When set to true, object retention is enabled for this bucket.", + // "location": "query", + // "type": "boolean" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", // "required": true, // "type": "string" // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "description": "The project to be billed for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.defaultObjectAccessControls.get": +// method id "storage.buckets.list": -type DefaultObjectAccessControlsGetCall struct { +type BucketsListCall struct { s *Service - bucket string - entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the default object ACL entry for the specified entity on -// the specified bucket. +// List: Retrieves a list of buckets for a given project. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { - c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.entity = entity +// - project: A valid API project identifier. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return in a single response. The service will use this +// parameter or 1,000 items, whichever is smaller. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { +// be billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5820,7 +5973,7 @@ func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *De // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5830,7 +5983,7 @@ func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *Defau // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { c.ifNoneMatch_ = entityTag return c } @@ -5838,21 +5991,21 @@ func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *Defa // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { +func (c *BucketsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -5865,28 +6018,24 @@ func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Respon var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "entity": c.entity, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -5905,7 +6054,7 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &Buckets{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -5917,69 +6066,117 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "description": "Retrieves a list of buckets for a given project.", // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.get", + // "id": "storage.buckets.list", // "parameterOrder": [ - // "bucket", - // "entity" + // "project" // ], // "parameters": { - // "bucket": { - // "description": "Name of a bucket.", - // "location": "path", - // "required": true, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", // "required": true, // "type": "string" // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "description": "The project to be billed for this request.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "path": "b", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "Buckets" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.defaultObjectAccessControls.insert": - -type DefaultObjectAccessControlsInsertCall struct { - s *Service - bucket string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a new default object ACL entry on the specified -// bucket. +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.buckets.lockRetentionPolicy": + +type BucketsLockRetentionPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// LockRetentionPolicy: Locks retention policy on a bucket. // -// - bucket: Name of a bucket. -func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { - c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - ifMetagenerationMatch: Makes the operation conditional on whether +// bucket's current metageneration matches the given value. +func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { + c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.objectaccesscontrol = objectaccesscontrol + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { +func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -5987,7 +6184,7 @@ func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { +func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -5995,21 +6192,21 @@ func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *De // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { +func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { +func (c *BucketsLockRetentionPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6017,14 +6214,9 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -6037,14 +6229,14 @@ func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.insert" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.buckets.lockRetentionPolicy" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6063,7 +6255,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6075,11 +6267,12 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Creates a new default object ACL entry on the specified bucket.", + // "description": "Locks retention policy on a bucket.", // "httpMethod": "POST", - // "id": "storage.defaultObjectAccessControls.insert", + // "id": "storage.buckets.lockRetentionPolicy", // "parameterOrder": [ - // "bucket" + // "bucket", + // "ifMetagenerationMatch" // ], // "parameters": { // "bucket": { @@ -6088,67 +6281,145 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "required": true, + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl", - // "request": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/lockRetentionPolicy", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.defaultObjectAccessControls.list": +// method id "storage.buckets.patch": -type DefaultObjectAccessControlsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves default object ACL entries on the specified bucket. +// Patch: Patches a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. // // - bucket: Name of a bucket. -func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { - c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket + c.bucket2 = bucket2 return c } // IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": If present, only return default ACL listing -// if the bucket's current metageneration matches this value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } // IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": If present, only return default ACL -// listing if the bucket's current metageneration does not match the -// given value. -func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// +// their roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// +// get READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and +// +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { +func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6156,54 +6427,46 @@ func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *D // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsListCall) Header() http.Header { +func (c *BucketsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -6214,18 +6477,18 @@ func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Respo return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { res.Body.Close() } return nil, gensupport.WrapError(&googleapi.Error{ @@ -6240,7 +6503,7 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControls{ + ret := &Bucket{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6252,9 +6515,9 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Retrieves default object ACL entries on the specified bucket.", - // "httpMethod": "GET", - // "id": "storage.defaultObjectAccessControls.list", + // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", // "parameterOrder": [ // "bucket" // ], @@ -6266,26 +6529,82 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "ifMetagenerationMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { - // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl", + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, // "response": { - // "$ref": "ObjectAccessControls" + // "$ref": "Bucket" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6295,35 +6614,30 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( } -// method id "storage.defaultObjectAccessControls.patch": +// method id "storage.buckets.setIamPolicy": -type DefaultObjectAccessControlsPatchCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsSetIamPolicyCall struct { + s *Service + bucket string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches a default object ACL entry on the specified bucket. +// SetIamPolicy: Updates an IAM policy for the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { - c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { + c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.policy = policy return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { +func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6331,7 +6645,7 @@ func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) * // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { +func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6339,21 +6653,21 @@ func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *Def // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { +func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { +func (c *BucketsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6361,35 +6675,34 @@ func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Resp } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.buckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6408,7 +6721,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6420,12 +6733,11 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Patches a default object ACL entry on the specified bucket.", - // "httpMethod": "PATCH", - // "id": "storage.defaultObjectAccessControls.patch", + // "description": "Updates an IAM policy for the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.setIamPolicy", // "parameterOrder": [ - // "bucket", - // "entity" + // "bucket" // ], // "parameters": { // "bucket": { @@ -6434,24 +6746,18 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "path": "b/{bucket}/iam", // "request": { - // "$ref": "ObjectAccessControl" + // "$ref": "Policy" // }, // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6461,35 +6767,32 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) } -// method id "storage.defaultObjectAccessControls.update": +// method id "storage.buckets.testIamPermissions": -type DefaultObjectAccessControlsUpdateCall struct { - s *Service - bucket string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsTestIamPermissionsCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates a default object ACL entry on the specified bucket. +// TestIamPermissions: Tests a set of permissions on the given bucket to +// see which, if any, are held by the caller. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { - c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - permissions: Permissions to test. +func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { + c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { +func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6497,65 +6800,72 @@ func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { +func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { +func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { +func (c *BucketsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.defaultObjectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.buckets.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -6574,7 +6884,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -6586,12 +6896,12 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Updates a default object ACL entry on the specified bucket.", - // "httpMethod": "PUT", - // "id": "storage.defaultObjectAccessControls.update", - // "parameterOrder": [ - // "bucket", - // "entity" + // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.buckets.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "permissions" // ], // "parameters": { // "bucket": { @@ -6600,9 +6910,10 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, // "required": true, // "type": "string" // }, @@ -6612,46 +6923,134 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) // "type": "string" // } // }, - // "path": "b/{bucket}/defaultObjectAcl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/iam/testPermissions", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.notifications.delete": +// method id "storage.buckets.update": -type NotificationsDeleteCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes a notification subscription. +// Update: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. // -// - bucket: The parent bucket of the notification. -// - notification: ID of the notification to delete. -func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { - c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.notification = notification + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Project team owners get OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// +// their roles. +// +// "publicRead" - Project team owners get OWNER access, and allUsers +// +// get READER access. +// +// "publicReadWrite" - Project team owners get OWNER access, and +// +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { +func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6659,7 +7058,7 @@ func (c *NotificationsDeleteCall) UserProject(userProject string) *Notifications // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6667,21 +7066,21 @@ func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDel // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsDeleteCall) Header() http.Header { +func (c *BucketsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6689,230 +7088,250 @@ func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, + "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.delete" call. -func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes a notification subscription.", - // "httpMethod": "DELETE", - // "id": "storage.notifications.delete", + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", // "parameterOrder": [ - // "bucket", - // "notification" + // "bucket" // ], // "parameters": { // "bucket": { - // "description": "The parent bucket of the notification.", + // "description": "Name of a bucket.", // "location": "path", // "required": true, // "type": "string" // }, - // "notification": { - // "description": "ID of the notification to delete.", - // "location": "path", - // "required": true, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", // "location": "query", // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ] - // } - -} - -// method id "storage.notifications.get": - -type NotificationsGetCall struct { - s *Service - bucket string - notification string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header -} - -// Get: View a notification configuration. -// -// - bucket: The parent bucket of the notification. -// - notification: Notification ID. -func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { - c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.notification = notification - return c + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { - c.urlParams_.Set("userProject", userProject) +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsGetCall) Header() http.Header { +func (c *ChannelsStopCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "notification": c.notification, - }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.get" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "View a notification configuration.", - // "httpMethod": "GET", - // "id": "storage.notifications.get", - // "parameterOrder": [ - // "bucket", - // "notification" - // ], - // "parameters": { - // "bucket": { - // "description": "The parent bucket of the notification.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "notification": { - // "description": "Notification ID", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "b/{bucket}/notificationConfigs/{notification}", - // "response": { - // "$ref": "Notification" + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -6925,30 +7344,34 @@ func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, } -// method id "storage.notifications.insert": +// method id "storage.defaultObjectAccessControls.delete": -type NotificationsInsertCall struct { - s *Service - bucket string - notification *Notification - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Creates a notification subscription for a given bucket. +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. // -// - bucket: The parent bucket of the notification. -func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { - c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.notification = notification + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { +func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -6956,7 +7379,7 @@ func (c *NotificationsInsertCall) UserProject(userProject string) *Notifications // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -6964,21 +7387,21 @@ func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsIns // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsInsertCall) Header() http.Header { +func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -6986,73 +7409,51 @@ func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.insert" call. -// Exactly one of *Notification or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notification.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Notification{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Creates a notification subscription for a given bucket.", - // "httpMethod": "POST", - // "id": "storage.notifications.insert", + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { - // "description": "The parent bucket of the notification.", + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7063,46 +7464,44 @@ func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notificatio // "type": "string" // } // }, - // "path": "b/{bucket}/notificationConfigs", - // "request": { - // "$ref": "Notification" - // }, - // "response": { - // "$ref": "Notification" - // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.notifications.list": +// method id "storage.defaultObjectAccessControls.get": -type NotificationsListCall struct { +type DefaultObjectAccessControlsGetCall struct { s *Service bucket string + entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// List: Retrieves a list of notification subscriptions for a given -// bucket. +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. // -// - bucket: Name of a Google Cloud Storage bucket. -func (r *NotificationsService) List(bucket string) *NotificationsListCall { - c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket + c.entity = entity return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7110,7 +7509,7 @@ func (c *NotificationsListCall) UserProject(userProject string) *NotificationsLi // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7120,7 +7519,7 @@ func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListC // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { c.ifNoneMatch_ = entityTag return c } @@ -7128,21 +7527,21 @@ func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsList // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *NotificationsListCall) Header() http.Header { +func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7155,7 +7554,7 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7164,18 +7563,19 @@ func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.notifications.list" call. -// Exactly one of *Notifications or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Notifications.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7194,7 +7594,7 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Notifications{ + ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7206,15 +7606,22 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications } return ret, nil // { - // "description": "Retrieves a list of notification subscriptions for a given bucket.", + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.notifications.list", + // "id": "storage.defaultObjectAccessControls.get", // "parameterOrder": [ - // "bucket" + // "bucket", + // "entity" // ], // "parameters": { // "bucket": { - // "description": "Name of a Google Cloud Storage bucket.", + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7225,61 +7632,43 @@ func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications // "type": "string" // } // }, - // "path": "b/{bucket}/notificationConfigs", + // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "response": { - // "$ref": "Notifications" + // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objectAccessControls.delete": +// method id "storage.defaultObjectAccessControls.insert": -type ObjectAccessControlsDeleteCall struct { - s *Service - bucket string - object string - entity string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Permanently deletes the ACL entry for the specified entity on -// the specified object. +// Insert: Creates a new default object ACL entry on the specified +// bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { - c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) + c.objectaccesscontrol = objectaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { +func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7287,7 +7676,7 @@ func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *Object // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7295,21 +7684,21 @@ func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsDeleteCall) Header() http.Header { +func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7317,43 +7706,69 @@ func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.delete" call. -func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", - // "httpMethod": "DELETE", - // "id": "storage.objectAccessControls.delete", + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", // "parameterOrder": [ - // "bucket", - // "object", - // "entity" + // "bucket" // ], // "parameters": { // "bucket": { @@ -7362,80 +7777,67 @@ func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objectAccessControls.get": +// method id "storage.defaultObjectAccessControls.list": -type ObjectAccessControlsGetCall struct { +type DefaultObjectAccessControlsListCall struct { s *Service bucket string - object string - entity string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the ACL entry for the specified entity on the specified -// object. +// List: Retrieves default object ACL entries on the specified bucket. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { - c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of a bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7443,7 +7845,7 @@ func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAcc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7453,7 +7855,7 @@ func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccess // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { c.ifNoneMatch_ = entityTag return c } @@ -7461,21 +7863,21 @@ func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAcces // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsGetCall) Header() http.Header { +func (c *DefaultObjectAccessControlsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7488,7 +7890,7 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -7497,20 +7899,18 @@ func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, err req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, - "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.get" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was +// *ObjectAccessControls.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7529,7 +7929,7 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &ObjectAccessControls{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7541,13 +7941,11 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA } return ret, nil // { - // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "description": "Retrieves default object ACL entries on the specified bucket.", // "httpMethod": "GET", - // "id": "storage.objectAccessControls.get", + // "id": "storage.defaultObjectAccessControls.list", // "parameterOrder": [ - // "bucket", - // "object", - // "entity" + // "bucket" // ], // "parameters": { // "bucket": { @@ -7556,22 +7954,16 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, // "userProject": { @@ -7580,9 +7972,9 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "path": "b/{bucket}/defaultObjectAcl", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "ObjectAccessControls" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -7592,42 +7984,35 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA } -// method id "storage.objectAccessControls.insert": +// method id "storage.defaultObjectAccessControls.patch": -type ObjectAccessControlsInsertCall struct { +type DefaultObjectAccessControlsPatchCall struct { s *Service bucket string - object string + entity string objectaccesscontrol *ObjectAccessControl urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Insert: Creates a new ACL entry on the specified object. +// Patch: Patches a default object ACL entry on the specified bucket. // // - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { - c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object + c.entity = entity c.objectaccesscontrol = objectaccesscontrol return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { +func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7635,7 +8020,7 @@ func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *Object // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -7643,21 +8028,21 @@ func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAcc // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsInsertCall) Header() http.Header { +func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -7672,28 +8057,28 @@ func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.insert" call. +// Do executes the "storage.defaultObjectAccessControls.patch" call. // Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7724,12 +8109,12 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje } return ret, nil // { - // "description": "Creates a new ACL entry on the specified object.", - // "httpMethod": "POST", - // "id": "storage.objectAccessControls.insert", + // "description": "Patches a default object ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", // "parameterOrder": [ // "bucket", - // "object" + // "entity" // ], // "parameters": { // "bucket": { @@ -7738,14 +8123,8 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7756,7 +8135,7 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl", + // "path": "b/{bucket}/defaultObjectAcl/{entity}", // "request": { // "$ref": "ObjectAccessControl" // }, @@ -7771,41 +8150,35 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje } -// method id "storage.objectAccessControls.list": +// method id "storage.defaultObjectAccessControls.update": -type ObjectAccessControlsListCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// List: Retrieves ACL entries on the specified object. +// Update: Updates a default object ACL entry on the specified bucket. // // - bucket: Name of a bucket. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { - c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { +func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -7813,73 +8186,65 @@ func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsListCall) Header() http.Header { +func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, + "entity": c.entity, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.list" call. -// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either -// *ObjectAccessControls.ServerResponse.Header or (if a response was +// *ObjectAccessControl.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -7898,7 +8263,7 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControls{ + ret := &ObjectAccessControl{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -7910,12 +8275,12 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object } return ret, nil // { - // "description": "Retrieves ACL entries on the specified object.", - // "httpMethod": "GET", - // "id": "storage.objectAccessControls.list", + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", // "parameterOrder": [ // "bucket", - // "object" + // "entity" // ], // "parameters": { // "bucket": { @@ -7924,14 +8289,8 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", // "location": "path", // "required": true, // "type": "string" @@ -7942,9 +8301,12 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl", + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, // "response": { - // "$ref": "ObjectAccessControls" + // "$ref": "ObjectAccessControl" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -7954,55 +8316,48 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object } -// method id "storage.objectAccessControls.patch": +// method id "storage.managedFolders.delete": -type ObjectAccessControlsPatchCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedFoldersDeleteCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Patch: Patches an ACL entry on the specified object. +// Delete: Permanently deletes a managed folder. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { - c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Delete(bucket string, managedFolder string) *ManagedFoldersDeleteCall { + c := &ManagedFoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.managedFolder = managedFolder return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the managed folder if +// its metageneration matches this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { - c.urlParams_.Set("userProject", userProject) +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the managed folder +// if its metageneration does not match this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { +func (c *ManagedFoldersDeleteCall) Fields(s ...googleapi.Field) *ManagedFoldersDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -8010,21 +8365,21 @@ func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAcce // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { +func (c *ManagedFoldersDeleteCall) Context(ctx context.Context) *ManagedFoldersDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsPatchCall) Header() http.Header { +func (c *ManagedFoldersDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -8032,228 +8387,189 @@ func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, e } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.patch" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +// Do executes the "storage.managedFolders.delete" call. +func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &ObjectAccessControl{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Patches an ACL entry on the specified object.", - // "httpMethod": "PATCH", - // "id": "storage.objectAccessControls.patch", + // "description": "Permanently deletes a managed folder.", + // "httpMethod": "DELETE", + // "id": "storage.managedFolders.delete", // "parameterOrder": [ // "bucket", - // "object", - // "entity" + // "managedFolder" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the managed folder if its metageneration matches this value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the managed folder if its metageneration does not match this value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, - // "response": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}", // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objectAccessControls.update": +// method id "storage.managedFolders.get": -type ObjectAccessControlsUpdateCall struct { - s *Service - bucket string - object string - entity string - objectaccesscontrol *ObjectAccessControl - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedFoldersGetCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Update: Updates an ACL entry on the specified object. +// Get: Returns metadata of the specified managed folder. // -// - bucket: Name of a bucket. -// - entity: The entity holding the permission. Can be user-userId, -// user-emailAddress, group-groupId, group-emailAddress, allUsers, or -// allAuthenticatedUsers. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { - c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Get(bucket string, managedFolder string) *ManagedFoldersGetCall { + c := &ManagedFoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - c.entity = entity - c.objectaccesscontrol = objectaccesscontrol + c.managedFolder = managedFolder return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the managed folder +// metadata conditional on whether the managed folder's current +// metageneration matches the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { - c.urlParams_.Set("userProject", userProject) +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the managed folder +// metadata conditional on whether the managed folder's current +// metageneration does not match the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { +func (c *ManagedFoldersGetCall) Fields(s ...googleapi.Field) *ManagedFoldersGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersGetCall) IfNoneMatch(entityTag string) *ManagedFoldersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { +func (c *ManagedFoldersGetCall) Context(ctx context.Context) *ManagedFoldersGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectAccessControlsUpdateCall) Header() http.Header { +func (c *ManagedFoldersGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, - "entity": c.entity, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objectAccessControls.update" call. -// Exactly one of *ObjectAccessControl or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *ObjectAccessControl.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.managedFolders.get" call. +// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { +func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8272,7 +8588,7 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &ObjectAccessControl{ + ret := &ManagedFolder{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8284,151 +8600,90 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje } return ret, nil // { - // "description": "Updates an ACL entry on the specified object.", - // "httpMethod": "PUT", - // "id": "storage.objectAccessControls.update", + // "description": "Returns metadata of the specified managed folder.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.get", // "parameterOrder": [ // "bucket", - // "object", - // "entity" + // "managedFolder" // ], // "parameters": { // "bucket": { - // "description": "Name of a bucket.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "entity": { - // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", - // "location": "path", - // "required": true, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/acl/{entity}", - // "request": { - // "$ref": "ObjectAccessControl" - // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}", // "response": { - // "$ref": "ObjectAccessControl" + // "$ref": "ManagedFolder" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.compose": +// method id "storage.managedFolders.getIamPolicy": -type ObjectsComposeCall struct { - s *Service - destinationBucket string - destinationObject string - composerequest *ComposeRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Compose: Concatenates a list of existing objects into a new object in -// the same bucket. -// -// - destinationBucket: Name of the bucket containing the source -// objects. The destination object is stored in this bucket. -// - destinationObject: Name of the new object. For information about -// how to URL encode object names to be path safe, see Encoding URI -// Path Parts. -func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { - c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.composerequest = composerequest - return c +type ManagedFoldersGetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project +// GetIamPolicy: Returns an IAM policy for the specified managed folder. // -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) GetIamPolicy(bucket string, managedFolder string) *ManagedFoldersGetIamPolicyCall { + c := &ManagedFoldersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *ManagedFoldersGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { +func (c *ManagedFoldersGetIamPolicyCall) UserProject(userProject string) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -8436,65 +8691,73 @@ func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { +func (c *ManagedFoldersGetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersGetIamPolicyCall) IfNoneMatch(entityTag string) *ManagedFoldersGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { +func (c *ManagedFoldersGetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsComposeCall) Header() http.Header { +func (c *ManagedFoldersGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.compose" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status +// Do executes the "storage.managedFolders.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { +func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8513,7 +8776,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8525,63 +8788,32 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Concatenates a list of existing objects into a new object in the same bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.compose", + // "description": "Returns an IAM policy for the specified managed folder.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.getIamPolicy", // "parameterOrder": [ - // "destinationBucket", - // "destinationObject" + // "bucket", + // "managedFolder" // ], // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationObject": { - // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", // "location": "query", - // "type": "string" + // "minimum": "1", + // "type": "integer" // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", @@ -8589,270 +8821,274 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // } // }, - // "path": "b/{destinationBucket}/o/{destinationObject}/compose", - // "request": { - // "$ref": "ComposeRequest" - // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", // "response": { - // "$ref": "Object" + // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.copy": +// method id "storage.managedFolders.insert": -type ObjectsCopyCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type ManagedFoldersInsertCall struct { + s *Service + bucket string + managedfolder *ManagedFolder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Copy: Copies a source object to a destination object. Optionally -// overrides metadata. +// Insert: Creates a new managed folder. // -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any.For information about how to URL encode object names to be path -// safe, see Encoding URI Path Parts. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. -func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { - c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) Insert(bucket string, managedfolder *ManagedFolder) *ManagedFoldersInsertCall { + c := &ManagedFoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedfolder = managedfolder return c } -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersInsertCall) Fields(s ...googleapi.Field) *ManagedFoldersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersInsertCall) Context(ctx context.Context) *ManagedFoldersInsertCall { + c.ctx_ = ctx return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the destination object's -// current generation matches the given value. Setting to 0 makes the -// operation succeed only if there are no live versions of the object. -func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the destination object's current generation does not match the given -// value. If no live object exists, the precondition fails. Setting to 0 -// makes the operation succeed only if there is a live version of the -// object. -func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c +func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} +// Do executes the "storage.managedFolders.insert" call. +// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new managed folder.", + // "httpMethod": "POST", + // "id": "storage.managedFolders.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders", + // "request": { + // "$ref": "ManagedFolder" + // }, + // "response": { + // "$ref": "ManagedFolder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c } -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} +// method id "storage.managedFolders.list": -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c +type ManagedFoldersListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) +// List: Lists managed folders in the given bucket. +// +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) List(bucket string) *ManagedFoldersListCall { + c := &ManagedFoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket return c } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { - c.urlParams_.Set("projection", projection) +// PageSize sets the optional parameter "pageSize": Maximum number of +// items return in a single page of responses. +func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ManagedFoldersListCall) PageToken(pageToken string) *ManagedFoldersListCall { + c.urlParams_.Set("pageToken", pageToken) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { - c.urlParams_.Set("userProject", userProject) +// Prefix sets the optional parameter "prefix": The managed folder +// name/path prefix to filter the output list of results. +func (c *ManagedFoldersListCall) Prefix(prefix string) *ManagedFoldersListCall { + c.urlParams_.Set("prefix", prefix) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { +func (c *ManagedFoldersListCall) Fields(s ...googleapi.Field) *ManagedFoldersListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersListCall) IfNoneMatch(entityTag string) *ManagedFoldersListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { +func (c *ManagedFoldersListCall) Context(ctx context.Context) *ManagedFoldersListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsCopyCall) Header() http.Header { +func (c *ManagedFoldersListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, + "bucket": c.bucket, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.copy" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.managedFolders.list" call. +// Exactly one of *ManagedFolders or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolders.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolders, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -8871,7 +9107,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &ManagedFolders{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -8883,227 +9119,100 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Copies a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.copy", + // "description": "Lists managed folders in the given bucket.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.list", // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" + // "bucket" // ], // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "pageSize": { + // "description": "Maximum number of items return in a single page of responses.", + // "format": "int32", // "location": "query", - // "type": "string" + // "minimum": "0", + // "type": "integer" // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", - // "location": "path", - // "required": true, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", // "type": "string" // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "prefix": { + // "description": "The managed folder name/path prefix to filter the output list of results.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/managedFolders", // "response": { - // "$ref": "Object" + // "$ref": "ManagedFolders" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.delete": - -type ObjectsDeleteCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Delete: Deletes an object and its metadata. Deletions are permanent -// if versioning is not enabled for the bucket, or if the generation -// parameter is used. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { - c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// permanently deletes a specific revision of this object (as opposed to -// the latest version, the default). -func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedFoldersListCall) Pages(ctx context.Context, f func(*ManagedFolders) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} +// method id "storage.managedFolders.setIamPolicy": -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c +type ManagedFoldersSetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) +// SetIamPolicy: Updates an IAM policy for the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) SetIamPolicy(bucket string, managedFolder string, policy *Policy) *ManagedFoldersSetIamPolicyCall { + c := &ManagedFoldersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + c.policy = policy return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { +func (c *ManagedFoldersSetIamPolicyCall) UserProject(userProject string) *ManagedFoldersSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -9111,7 +9220,7 @@ func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { +func (c *ManagedFoldersSetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9119,21 +9228,21 @@ func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { +func (c *ManagedFoldersSetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsDeleteCall) Header() http.Header { +func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9141,81 +9250,81 @@ func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.delete" call. -func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { +// Do executes the "storage.managedFolders.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } if err != nil { - return err + return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return gensupport.WrapError(err) + return nil, gensupport.WrapError(err) } - return nil + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil // { - // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", - // "httpMethod": "DELETE", - // "id": "storage.objects.delete", + // "description": "Updates an IAM policy for the specified managed folder.", + // "httpMethod": "PUT", + // "id": "storage.managedFolders.setIamPolicy", // "parameterOrder": [ // "bucket", - // "object" + // "managedFolder" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" @@ -9226,99 +9335,50 @@ func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objects.get": +// method id "storage.managedFolders.testIamPermissions": -type ObjectsGetCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +type ManagedFoldersTestIamPermissionsCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// Get: Retrieves an object or its metadata. +// TestIamPermissions: Tests a set of permissions on the given managed +// folder to see which, if any, are held by the caller. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { - c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +// - permissions: Permissions to test. +func (r *ManagedFoldersService) TestIamPermissions(bucket string, managedFolder string, permissions []string) *ManagedFoldersTestIamPermissionsCall { + c := &ManagedFoldersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { - c.urlParams_.Set("projection", projection) + c.managedFolder = managedFolder + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { +func (c *ManagedFoldersTestIamPermissionsCall) UserProject(userProject string) *ManagedFoldersTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } @@ -9326,7 +9386,7 @@ func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { +func (c *ManagedFoldersTestIamPermissionsCall) Fields(s ...googleapi.Field) *ManagedFoldersTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9336,29 +9396,29 @@ func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. -func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { +func (c *ManagedFoldersTestIamPermissionsCall) IfNoneMatch(entityTag string) *ManagedFoldersTestIamPermissionsCall { c.ifNoneMatch_ = entityTag return c } -// Context sets the context to be used in this call's Do and Download -// methods. Any pending HTTP request will be aborted if the provided -// context is canceled. -func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersTestIamPermissionsCall) Context(ctx context.Context) *ManagedFoldersTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsGetCall) Header() http.Header { +func (c *ManagedFoldersTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { +func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9371,7 +9431,7 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { @@ -9379,36 +9439,20 @@ func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "managedFolder": c.managedFolder, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Download fetches the API endpoint's "media" value, instead of the normal -// API response value. If the returned error is nil, the Response is guaranteed to -// have a 2xx status code. Callers must close the Response.Body as usual. -func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("media") - if err != nil { - return nil, err - } - if err := googleapi.CheckMediaResponse(res); err != nil { - res.Body.Close() - return nil, gensupport.WrapError(err) - } - return res, nil -} - -// Do executes the "storage.objects.get" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.managedFolders.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -9427,7 +9471,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9439,67 +9483,32 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Retrieves an object or its metadata.", + // "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", // "httpMethod": "GET", - // "id": "storage.objects.get", + // "id": "storage.managedFolders.testIamPermissions", // "parameterOrder": [ // "bucket", - // "object" + // "managedFolder", + // "permissions" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of the bucket containing the managed folder.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "managedFolder": { + // "description": "The managed folder name/path.", // "location": "path", // "required": true, // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], + // "permissions": { + // "description": "Permissions to test.", // "location": "query", + // "repeated": true, + // "required": true, // "type": "string" // }, // "userProject": { @@ -9508,9 +9517,9 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", // "response": { - // "$ref": "Object" + // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9518,48 +9527,36 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaDownload": true, - // "useMediaDownloadService": true + // ] // } } -// method id "storage.objects.getIamPolicy": +// method id "storage.notifications.delete": -type ObjectsGetIamPolicyCall struct { +type NotificationsDeleteCall struct { s *Service bucket string - object string + notification string urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// GetIamPolicy: Returns an IAM policy for the specified object. +// Delete: Permanently deletes a notification subscription. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { - c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: The parent bucket of the notification. +// - notification: ID of the notification to delete. +func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { + c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) + c.notification = notification return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { +func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { c.urlParams_.Set("userProject", userProject) return c } @@ -9567,74 +9564,202 @@ func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { +func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { +func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsGetIamPolicyCall) Header() http.Header { +func (c *NotificationsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "notification": c.notification, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.getIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { - gensupport.SetOptions(c.urlParams_, opts...) +// Do executes the "storage.notifications.delete" call. +func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Permanently deletes a notification subscription.", + // "httpMethod": "DELETE", + // "id": "storage.notifications.delete", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "ID of the notification to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.get": + +type NotificationsGetCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: View a notification configuration. +// +// - bucket: The parent bucket of the notification. +// - notification: Notification ID. +func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { + c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.get" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { @@ -9652,7 +9777,7 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Notification{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -9664,28 +9789,22 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Returns an IAM policy for the specified object.", + // "description": "View a notification configuration.", // "httpMethod": "GET", - // "id": "storage.objects.getIamPolicy", + // "id": "storage.notifications.get", // "parameterOrder": [ // "bucket", - // "object" + // "notification" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "The parent bucket of the notification.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "notification": { + // "description": "Notification ID", // "location": "path", // "required": true, // "type": "string" @@ -9696,9 +9815,9 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/iam", + // "path": "b/{bucket}/notificationConfigs/{notification}", // "response": { - // "$ref": "Policy" + // "$ref": "Notification" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -9711,95 +9830,3854 @@ func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } -// method id "storage.objects.insert": +// method id "storage.notifications.insert": -type ObjectsInsertCall struct { - s *Service - bucket string - object *Object - urlParams_ gensupport.URLParams - mediaInfo_ *gensupport.MediaInfo - retry *gensupport.RetryConfig - ctx_ context.Context - header_ http.Header +type NotificationsInsertCall struct { + s *Service + bucket string + notification *Notification + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Insert: Stores a new object and metadata. +// Insert: Creates a notification subscription for a given bucket. // -// - bucket: Name of the bucket in which to store the new object. -// Overrides the provided object metadata's bucket value, if any. -func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { - c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: The parent bucket of the notification. +func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { + c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.object = object + c.notification = notification return c } -// ContentEncoding sets the optional parameter "contentEncoding": If -// set, sets the contentEncoding property of the final object to this -// value. Setting this parameter is equivalent to setting the -// contentEncoding metadata property. This can be useful when uploading -// an object with uploadType=media to indicate the encoding of the -// content being uploaded. -func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { - c.urlParams_.Set("contentEncoding", contentEncoding) +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { + c.urlParams_.Set("userProject", userProject) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { + c.ctx_ = ctx return c } -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ } -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { +func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.insert" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a notification subscription for a given bucket.", + // "httpMethod": "POST", + // "id": "storage.notifications.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "request": { + // "$ref": "Notification" + // }, + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.list": + +type NotificationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of notification subscriptions for a given +// bucket. +// +// - bucket: Name of a Google Cloud Storage bucket. +func (r *NotificationsService) List(bucket string) *NotificationsListCall { + c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.list" call. +// Exactly one of *Notifications or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Notifications{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of notification subscriptions for a given bucket.", + // "httpMethod": "GET", + // "id": "storage.notifications.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a Google Cloud Storage bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "response": { + // "$ref": "Notifications" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified object. +// +// - bucket: Name of a bucket. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an ACL entry on the specified object.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified object. +// +// - bucket: Name of a bucket. +// - entity: The entity holding the permission. Can be user-userId, +// user-emailAddress, group-groupId, group-emailAddress, allUsers, or +// allAuthenticatedUsers. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.bulkRestore": + +type ObjectsBulkRestoreCall struct { + s *Service + bucket string + bulkrestoreobjectsrequest *BulkRestoreObjectsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BulkRestore: Initiates a long-running bulk restore operation on the +// specified bucket. +// +// - bucket: Name of the bucket in which the object resides. +func (r *ObjectsService) BulkRestore(bucket string, bulkrestoreobjectsrequest *BulkRestoreObjectsRequest) *ObjectsBulkRestoreCall { + c := &ObjectsBulkRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bulkrestoreobjectsrequest = bulkrestoreobjectsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsBulkRestoreCall) Fields(s ...googleapi.Field) *ObjectsBulkRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsBulkRestoreCall) Context(ctx context.Context) *ObjectsBulkRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsBulkRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/bulkRestore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.bulkRestore" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Initiates a long-running bulk restore operation on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.bulkRestore", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/bulkRestore", + // "request": { + // "$ref": "BulkRestoreObjectsRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +// +// - destinationBucket: Name of the bucket containing the source +// objects. The destination object is stored in this bucket. +// - destinationObject: Name of the new object. For information about +// how to URL encode object names to be path safe, see Encoding URI +// Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsComposeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any.For information about how to URL encode object names to be path +// safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. Setting to 0 makes the +// operation succeed only if there are no live versions of the object. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. If no live object exists, the precondition fails. Setting to 0 +// makes the operation succeed only if there is a live version of the +// object. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an object or its metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. +// For more information, see Soft Delete. +func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, gensupport.WrapError(err) + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "softDeleted": { + // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + // "location": "query", + // "type": "boolean" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.getIamPolicy": + +type ObjectsGetIamPolicyCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified object. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { + c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified object.", + // "httpMethod": "GET", + // "id": "storage.objects.getIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + retry *gensupport.RetryConfig + ctx_ context.Context + header_ http.Header +} + +// Insert: Stores a new object and metadata. +// +// - bucket: Name of the bucket in which to store the new object. +// Overrides the provided object metadata's bucket value, if any. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { - c.urlParams_.Set("kmsKeyName", kmsKeyName) +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.mediaInfo_.SetProgressUpdater(pu) + return c +} + +// WithRetry causes the library to retry the initial request of the +// upload(for resumable uploads) or the entire upload (for multipart +// uploads) ifa transient error occurs. This is contingent on ChunkSize +// being > 0 (sothat the input data may be buffered). The backoff +// argument will be used todetermine exponential backoff timing, and the +// errorFunc is used to determinewhich errors are considered retryable. +// By default, exponetial backoff will beapplied using gax defaults, and +// the following errors are retried: +// +// - HTTP responses with codes 408, 429, 502, 503, and 504. +// +// - Transient network errors such as connection reset and +// io.ErrUnexpectedEOF. +// +// - Errors which are considered transient using the Temporary() +// interface. +// +// - Wrapped versions of these errors. +func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { + c.retry = &gensupport.RetryConfig{ + Backoff: bo, + ShouldRetry: errorFunc, + } + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.mediaInfo_ != nil { + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + req.GetBody = getBody + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + if c.retry != nil { + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) + } + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() + rx.Retry = c.retry + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of objects matching the criteria. +// +// - bucket: Name of the bucket in which to look for objects. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeFoldersAsPrefixes sets the optional parameter +// "includeFoldersAsPrefixes": Only applicable if delimiter is set to +// '/'. If true, will also include folders and managed folders (besides +// objects) in the returned prefixes. +func (c *ObjectsListCall) IncludeFoldersAsPrefixes(includeFoldersAsPrefixes bool) *ObjectsListCall { + c.urlParams_.Set("includeFoldersAsPrefixes", fmt.Sprint(includeFoldersAsPrefixes)) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in +// addition to prefixes. +func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MatchGlob sets the optional parameter "matchGlob": Filter results to +// objects and prefixes that match this glob pattern. +func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { + c.urlParams_.Set("matchGlob", matchGlob) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. +// For more information, see Soft Delete. +func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "includeFoldersAsPrefixes": { + // "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "includeTrailingDelimiter": { + // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "matchGlob": { + // "description": "Filter results to objects and prefixes that match this glob pattern.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "softDeleted": { + // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + // "location": "query", + // "type": "boolean" + // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an object's metadata. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// Name sets the optional parameter "name": Name of the object. Required -// when the object metadata is not otherwise provided. Overrides the -// object metadata's name value, if any. For information about how to -// URL encode object names to be path safe, see Encoding URI Path Parts. -func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { - c.urlParams_.Set("name", name) +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its +// mode from unlocked to locked. +func (c *ObjectsPatchCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsPatchCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) return c } @@ -9828,101 +13706,325 @@ func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { // "publicRead" - Object owner gets OWNER access, and allUsers get // // READER access. -func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { c.urlParams_.Set("predefinedAcl", predefinedAcl) return c } // Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. +// properties to return. Defaults to full. // // Possible values: // // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. -func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.urlParams_.Set("projection", projection) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { - c.urlParams_.Set("userProject", userProject) +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request, for Requester Pays buckets. +func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an object's metadata.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "overrideUnlockedRetention": { + // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + // "location": "query", + // "type": "boolean" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.restore": + +type ObjectsRestoreCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted object. +// +// - bucket: Name of the bucket in which the object resides. +// - generation: Selects a specific revision of this object. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +func (r *ObjectsService) Restore(bucket string, object string, object2 *Object) *ObjectsRestoreCall { + c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// CopySourceAcl sets the optional parameter "copySourceAcl": If true, +// copies the source object's ACL; otherwise, uses the bucket's default +// object ACL. The default is false. +func (c *ObjectsRestoreCall) CopySourceAcl(copySourceAcl bool) *ObjectsRestoreCall { + c.urlParams_.Set("copySourceAcl", fmt.Sprint(copySourceAcl)) return c } -// Media specifies the media to upload in one or more chunks. The chunk -// size may be controlled by supplying a MediaOption generated by -// googleapi.ChunkSize. The chunk size defaults to -// googleapi.DefaultUploadChunkSize.The Content-Type header used in the -// upload request will be determined by sniffing the contents of r, -// unless a MediaOption generated by googleapi.ContentType is -// supplied. -// At most one of Media and ResumableMedia may be set. -func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { - if ct := c.object.ContentType; ct != "" { - options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) - } - c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's one live +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRestoreCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) return c } -// ResumableMedia specifies the media to upload in chunks and can be -// canceled with ctx. -// -// Deprecated: use Media instead. -// -// At most one of Media and ResumableMedia may be set. mediaType -// identifies the MIME media type of the upload, such as "image/png". If -// mediaType is "", it will be auto-detected. The provided ctx will -// supersede any context previously provided to the Context method. -func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { - c.ctx_ = ctx - c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// none of the object's live generations match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRestoreCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// ProgressUpdater provides a callback function that will be called -// after every chunk. It should be a low-latency function in order to -// not slow down the upload operation. This should only be called when -// using ResumableMedia (as opposed to Media). -func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { - c.mediaInfo_.SetProgressUpdater(pu) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's one live metageneration matches the given value. +func (c *ObjectsRestoreCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// WithRetry causes the library to retry the initial request of the -// upload(for resumable uploads) or the entire upload (for multipart -// uploads) ifa transient error occurs. This is contingent on ChunkSize -// being > 0 (sothat the input data may be buffered). The backoff -// argument will be used todetermine exponential backoff timing, and the -// errorFunc is used to determinewhich errors are considered retryable. -// By default, exponetial backoff will beapplied using gax defaults, and -// the following errors are retried: -// -// - HTTP responses with codes 408, 429, 502, 503, and 504. -// -// - Transient network errors such as connection reset and -// io.ErrUnexpectedEOF. +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether none of the object's live metagenerations match the given +// value. +func (c *ObjectsRestoreCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. // -// - Errors which are considered transient using the Temporary() -// interface. +// Possible values: // -// - Wrapped versions of these errors. -func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall { - c.retry = &gensupport.RetryConfig{ - Backoff: bo, - ShouldRetry: errorFunc, - } +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { + c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { +func (c *ObjectsRestoreCall) Fields(s ...googleapi.Field) *ObjectsRestoreCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -9930,23 +14032,21 @@ func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -// This context will supersede any context previously provided to the -// ResumableMedia method. -func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { +func (c *ObjectsRestoreCall) Context(ctx context.Context) *ObjectsRestoreCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsInsertCall) Header() http.Header { +func (c *ObjectsRestoreCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -9954,48 +14054,35 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.mediaInfo_ != nil { - urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") - c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) - } - if body == nil { - body = new(bytes.Buffer) - reqHeaders.Set("Content-Type", "application/json") - } - body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) - defer cleanup() + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders - req.GetBody = getBody googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, + "object": c.object, }) - if c.retry != nil { - return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry) - } return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.insert" call. +// Do executes the "storage.objects.restore" call. // Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { +func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10014,24 +14101,6 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) - if rx != nil { - rx.Client = c.s.client - rx.UserAgent = c.s.userAgent() - rx.Retry = c.retry - ctx := c.ctx_ - if ctx == nil { - ctx = context.TODO() - } - res, err = rx.Upload(ctx) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - } ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, @@ -10044,96 +14113,64 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Stores a new object and metadata.", + // "description": "Restores a soft-deleted object.", // "httpMethod": "POST", - // "id": "storage.objects.insert", - // "mediaUpload": { - // "accept": [ - // "*/*" - // ], - // "protocols": { - // "resumable": { - // "multipart": true, - // "path": "/resumable/upload/storage/v1/b/{bucket}/o" - // }, - // "simple": { - // "multipart": true, - // "path": "/upload/storage/v1/b/{bucket}/o" - // } - // } - // }, + // "id": "storage.objects.restore", // "parameterOrder": [ - // "bucket" + // "bucket", + // "object" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, - // "contentEncoding": { - // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "copySourceAcl": { + // "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", // "location": "query", + // "type": "boolean" + // }, + // "generation": { + // "description": "Selects a specific revision of this object.", + // "format": "int64", + // "location": "query", + // "required": true, // "type": "string" // }, // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", // "format": "int64", // "location": "query", // "type": "string" // }, - // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "name": { - // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "query", - // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, // "type": "string" // }, // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "description": "Set of properties to return. Defaults to full.", // "enum": [ // "full", // "noAcl" @@ -10151,7 +14188,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // } // }, - // "path": "b/{bucket}/o", + // "path": "b/{bucket}/o/{object}/restore", // "request": { // "$ref": "Object" // }, @@ -10160,200 +14197,286 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsMediaUpload": true + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] // } } -// method id "storage.objects.list": +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +// +// - destinationBucket: Name of the bucket in which to store the new +// object. Overrides the provided object metadata's bucket value, if +// any. +// - destinationObject: Name of the new object. Required when the object +// metadata is not otherwise provided. Overrides the object metadata's +// name value, if any. For information about how to URL encode object +// names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - sourceBucket: Name of the bucket in which to find the source +// object. +// - sourceObject: Name of the source object. For information about how +// to URL encode object names to be path safe, see Encoding URI Path +// Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// +// that will be used to encrypt the object. Overrides the object +// +// metadata's kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} -type ObjectsListCall struct { - s *Service - bucket string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c } -// List: Retrieves a list of objects matching the criteria. -// -// - bucket: Name of the bucket in which to look for objects. -func (r *ObjectsService) List(bucket string) *ObjectsListCall { - c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) return c } -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { - c.urlParams_.Set("delimiter", delimiter) +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) return c } -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { - c.urlParams_.Set("endOffset", endOffset) +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) return c } -// IncludeTrailingDelimiter sets the optional parameter -// "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. -func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { - c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) return c } -// MatchGlob sets the optional parameter "matchGlob": Filter results to -// objects and prefixes that match this glob pattern. -func (c *ObjectsListCall) MatchGlob(matchGlob string) *ObjectsListCall { - c.urlParams_.Set("matchGlob", matchGlob) +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) return c } -// PageToken sets the optional parameter "pageToken": A -// previously-returned page token representing part of the larger set of -// results to view. -func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { - c.urlParams_.Set("pageToken", pageToken) +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { - c.urlParams_.Set("prefix", prefix) +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) return c } // Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. // // Possible values: // // "full" - Include all properties. // "noAcl" - Omit the owner, acl property. -func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.urlParams_.Set("projection", projection) return c } -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { - c.urlParams_.Set("startOffset", startOffset) +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) return c } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { - c.urlParams_.Set("userProject", userProject) +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) return c } -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { + c.urlParams_.Set("userProject", userProject) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsListCall) Header() http.Header { +func (c *ObjectsRewriteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.list" call. -// Exactly one of *Objects or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Objects.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10372,7 +14495,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Objects{ + ret := &RewriteResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10384,59 +14507,110 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { } return ret, nil // { - // "description": "Retrieves a list of objects matching the criteria.", - // "httpMethod": "GET", - // "id": "storage.objects.list", + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", // "parameterOrder": [ - // "bucket" + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" // ], // "parameters": { - // "bucket": { - // "description": "Name of the bucket in which to look for objects.", + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], // "location": "query", // "type": "string" // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", // "location": "query", // "type": "string" // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", // "location": "query", - // "type": "boolean" + // "type": "string" // }, - // "matchGlob": { - // "description": "Filter results to objects and prefixes that match this glob pattern.", + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", // "location": "query", // "type": "string" // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", // "location": "query", - // "minimum": "0", - // "type": "integer" + // "type": "string" // }, - // "pageToken": { - // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", // "location": "query", // "type": "string" // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", // "location": "query", // "type": "string" // }, // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", // "enum": [ // "full", // "noAcl" @@ -10448,173 +14622,88 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", // "location": "query", // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", // "location": "query", // "type": "string" // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", - // "type": "boolean" + // "type": "string" // } // }, - // "path": "b/{bucket}/o", + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, // "response": { - // "$ref": "Objects" + // "$ref": "RewriteResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true + // ] // } } -// Pages invokes f for each page of results. -// A non-nil error returned from f will halt the iteration. -// The provided context supersedes any context provided to the Context method. -func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { - c.ctx_ = ctx - defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point - for { - x, err := c.Do() - if err != nil { - return err - } - if err := f(x); err != nil { - return err - } - if x.NextPageToken == "" { - return nil - } - c.PageToken(x.NextPageToken) - } -} - -// method id "storage.objects.patch": +// method id "storage.objects.setIamPolicy": -type ObjectsPatchCall struct { +type ObjectsSetIamPolicyCall struct { s *Service bucket string object string - object2 *Object + policy *Policy urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// Patch: Patches an object's metadata. +// SetIamPolicy: Updates an IAM policy for the specified object. // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { - c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { + c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object - c.object2 = object2 + c.policy = policy return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). -func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { +func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { - c.urlParams_.Set("projection", projection) - return c -} - // UserProject sets the optional parameter "userProject": The project to -// be billed for this request, for Requester Pays buckets. -func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { c.urlParams_.Set("userProject", userProject) return c } @@ -10622,7 +14711,7 @@ func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { +func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -10630,21 +14719,21 @@ func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { +func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsPatchCall) Header() http.Header { +func (c *ObjectsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -10652,16 +14741,16 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } @@ -10673,14 +14762,14 @@ func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.patch" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status +// Do executes the "storage.objects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) +// *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { +func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -10699,7 +14788,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Object{ + ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -10711,9 +14800,9 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { } return ret, nil // { - // "description": "Patches an object's metadata.", - // "httpMethod": "PATCH", - // "id": "storage.objects.patch", + // "description": "Updates an IAM policy for the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objects.setIamPolicy", // "parameterOrder": [ // "bucket", // "object" @@ -10731,295 +14820,73 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, // "userProject": { - // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", + // "path": "b/{bucket}/o/{object}/iam", // "request": { - // "$ref": "Object" + // "$ref": "Policy" // }, // "response": { - // "$ref": "Object" + // "$ref": "Policy" // }, // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" - // ] - // } - -} - -// method id "storage.objects.rewrite": - -type ObjectsRewriteCall struct { - s *Service - sourceBucket string - sourceObject string - destinationBucket string - destinationObject string - object *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Rewrite: Rewrites a source object to a destination object. Optionally -// overrides metadata. -// -// - destinationBucket: Name of the bucket in which to store the new -// object. Overrides the provided object metadata's bucket value, if -// any. -// - destinationObject: Name of the new object. Required when the object -// metadata is not otherwise provided. Overrides the object metadata's -// name value, if any. For information about how to URL encode object -// names to be path safe, see Encoding URI Path Parts. -// - sourceBucket: Name of the bucket in which to find the source -// object. -// - sourceObject: Name of the source object. For information about how -// to URL encode object names to be path safe, see Encoding URI Path -// Parts. -func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { - c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.sourceBucket = sourceBucket - c.sourceObject = sourceObject - c.destinationBucket = destinationBucket - c.destinationObject = destinationObject - c.object = object - return c -} - -// DestinationKmsKeyName sets the optional parameter -// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the -// form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// -// that will be used to encrypt the object. Overrides the object -// -// metadata's kms_key_name value, if any. -func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) - return c -} - -// DestinationPredefinedAcl sets the optional parameter -// "destinationPredefinedAcl": Apply a predefined set of access controls -// to the destination object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { - c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the destination object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the destination object's current metageneration does not -// match the given value. -func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// IfSourceGenerationMatch sets the optional parameter -// "ifSourceGenerationMatch": Makes the operation conditional on whether -// the source object's current generation matches the given value. -func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) - return c -} - -// IfSourceGenerationNotMatch sets the optional parameter -// "ifSourceGenerationNotMatch": Makes the operation conditional on -// whether the source object's current generation does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) - return c -} - -// IfSourceMetagenerationMatch sets the optional parameter -// "ifSourceMetagenerationMatch": Makes the operation conditional on -// whether the source object's current metageneration matches the given -// value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) - return c -} - -// IfSourceMetagenerationNotMatch sets the optional parameter -// "ifSourceMetagenerationNotMatch": Makes the operation conditional on -// whether the source object's current metageneration does not match the -// given value. -func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { - c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) - return c -} - -// MaxBytesRewrittenPerCall sets the optional parameter -// "maxBytesRewrittenPerCall": The maximum number of bytes that will be -// rewritten per rewrite request. Most callers shouldn't need to specify -// this parameter - it is primarily in place to support testing. If -// specified the value must be an integral multiple of 1 MiB (1048576). -// Also, this only applies to requests where the source and destination -// span locations and/or storage classes. Finally, this value must not -// change across rewrite calls else you'll get an error that the -// rewriteToken is invalid. -func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { - c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) - return c + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + } -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl, unless the object resource -// specifies the acl property, when it defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { - c.urlParams_.Set("projection", projection) - return c +// method id "storage.objects.testIamPermissions": + +type ObjectsTestIamPermissionsCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// RewriteToken sets the optional parameter "rewriteToken": Include this -// field (from the previous rewrite response) on each rewrite request -// after the first one, until the rewrite response 'done' flag is true. -// Calls that provide a rewriteToken can omit all other request fields, -// but if included those fields must match the values provided in the -// first rewrite request. -func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { - c.urlParams_.Set("rewriteToken", rewriteToken) +// TestIamPermissions: Tests a set of permissions on the given object to +// see which, if any, are held by the caller. +// +// - bucket: Name of the bucket in which the object resides. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +// - permissions: Permissions to test. +func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { + c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) return c } -// SourceGeneration sets the optional parameter "sourceGeneration": If -// present, selects a specific revision of the source object (as opposed -// to the latest version, the default). -func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { - c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { +func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("userProject", userProject) return c } @@ -11027,67 +14894,73 @@ func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { +func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { +func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsRewriteCall) Header() http.Header { +func (c *ObjectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "sourceBucket": c.sourceBucket, - "sourceObject": c.sourceObject, - "destinationBucket": c.destinationBucket, - "destinationObject": c.destinationObject, + "bucket": c.bucket, + "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.rewrite" call. -// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *RewriteResponse.ServerResponse.Header or (if a response was returned -// at all) in error.(*googleapi.Error).Header. Use +// Do executes the "storage.objects.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. -func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { +func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11106,7 +14979,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &RewriteResponse{ + ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11118,141 +14991,37 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, } return ret, nil // { - // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", - // "httpMethod": "POST", - // "id": "storage.objects.rewrite", + // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.objects.testIamPermissions", // "parameterOrder": [ - // "sourceBucket", - // "sourceObject", - // "destinationBucket", - // "destinationObject" + // "bucket", + // "object", + // "permissions" // ], // "parameters": { - // "destinationBucket": { - // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "destinationKmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", - // "location": "query", - // "type": "string" - // }, - // "destinationObject": { - // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "bucket": { + // "description": "Name of the bucket in which the object resides.", // "location": "path", // "required": true, // "type": "string" // }, - // "destinationPredefinedAcl": { - // "description": "Apply a predefined set of access controls to the destination object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifSourceMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "maxBytesRewrittenPerCall": { - // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", // "format": "int64", // "location": "query", // "type": "string" // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "rewriteToken": { - // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", - // "location": "query", - // "type": "string" - // }, - // "sourceBucket": { - // "description": "Name of the bucket in which to find the source object.", + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" // }, - // "sourceGeneration": { - // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", - // "format": "int64", + // "permissions": { + // "description": "Permissions to test.", // "location": "query", - // "type": "string" - // }, - // "sourceObject": { - // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", + // "repeated": true, // "required": true, // "type": "string" // }, @@ -11262,58 +15031,145 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "type": "string" // } // }, - // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", - // "request": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/o/{object}/iam/testPermissions", // "response": { - // "$ref": "RewriteResponse" + // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.setIamPolicy": +// method id "storage.objects.update": -type ObjectsSetIamPolicyCall struct { +type ObjectsUpdateCall struct { s *Service bucket string object string - policy *Policy + object2 *Object urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SetIamPolicy: Updates an IAM policy for the specified object. +// Update: Updates an object's metadata. // // - bucket: Name of the bucket in which the object resides. // - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { - c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// object names to be path safe, see Encoding URI Path Parts +// (https://cloud.google.com/storage/docs/request-endpoints#encoding). +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket c.object = object - c.policy = policy + c.object2 = object2 return c } // Generation sets the optional parameter "generation": If present, // selects a specific revision of this object (as opposed to the latest // version, the default). -func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { c.urlParams_.Set("generation", fmt.Sprint(generation)) return c } +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its +// mode from unlocked to locked. +func (c *ObjectsUpdateCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsUpdateCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// +// "authenticatedRead" - Object owner gets OWNER access, and +// +// allAuthenticatedUsers get READER access. +// +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// +// project team owners get OWNER access. +// +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// +// team owners get READER access. +// +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// +// members get access according to their roles. +// +// "publicRead" - Object owner gets OWNER access, and allUsers get +// +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { c.urlParams_.Set("userProject", userProject) return c } @@ -11321,7 +15177,7 @@ func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIam // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11329,21 +15185,21 @@ func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPol // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsSetIamPolicyCall) Header() http.Header { +func (c *ObjectsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -11351,14 +15207,14 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { @@ -11372,14 +15228,14 @@ func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.setIamPolicy" call. -// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either -// *Policy.ServerResponse.Header or (if a response was returned at all) +// *Object.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. -func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11398,7 +15254,7 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Policy{ + ret := &Object{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11410,9 +15266,9 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err } return ret, nil // { - // "description": "Updates an IAM policy for the specified object.", + // "description": "Updates an object's metadata.", // "httpMethod": "PUT", - // "id": "storage.objects.setIamPolicy", + // "id": "storage.objects.update", // "parameterOrder": [ // "bucket", // "object" @@ -11430,146 +15286,268 @@ func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "location": "query", // "type": "string" // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see [Encoding URI Path Parts](https://cloud.google.com/storage/docs/request-endpoints#encoding).", // "location": "path", // "required": true, // "type": "string" // }, + // "overrideUnlockedRetention": { + // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + // "location": "query", + // "type": "boolean" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}/iam", + // "path": "b/{bucket}/o/{object}", // "request": { - // "$ref": "Policy" + // "$ref": "Object" // }, // "response": { - // "$ref": "Policy" + // "$ref": "Object" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } } -// method id "storage.objects.testIamPermissions": +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// WatchAll: Watch for changes on all objects in a bucket. +// +// - bucket: Name of the bucket in which to look for objects. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in +// addition to prefixes. +func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} -type ObjectsTestIamPermissionsCall struct { - s *Service - bucket string - object string - urlParams_ gensupport.URLParams - ifNoneMatch_ string - ctx_ context.Context - header_ http.Header +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c } -// TestIamPermissions: Tests a set of permissions on the given object to -// see which, if any, are held by the caller. +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. // -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -// - permissions: Permissions to test. -func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { - c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) return c } -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("startOffset", startOffset) return c } // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { +func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { c.urlParams_.Set("userProject", userProject) return c } +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { - c.ifNoneMatch_ = entityTag - return c -} - // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsTestIamPermissionsCall) Header() http.Header { +func (c *ObjectsWatchAllCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, - "object": c.object, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.testIamPermissions" call. -// Exactly one of *TestIamPermissionsResponse or error will be non-nil. -// Any non-2xx status code is an error. Response headers are in either -// *TestIamPermissionsResponse.ServerResponse.Header or (if a response -// was returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -11588,7 +15566,7 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &TestIamPermissionsResponse{ + ret := &Channel{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -11600,49 +15578,88 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI } return ret, nil // { - // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", - // "httpMethod": "GET", - // "id": "storage.objects.testIamPermissions", + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", // "parameterOrder": [ - // "bucket", - // "object", - // "permissions" + // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "Name of the bucket in which to look for objects.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", // "location": "query", // "type": "string" // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", - // "location": "path", - // "required": true, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", // "type": "string" // }, - // "permissions": { - // "description": "Permissions to test.", + // "includeTrailingDelimiter": { + // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", // "location": "query", - // "repeated": true, - // "required": true, // "type": "string" // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" // } // }, - // "path": "b/{bucket}/o/{object}/iam/testPermissions", + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, // "response": { - // "$ref": "TestIamPermissionsResponse" + // "$ref": "Channel" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -11650,133 +15667,40 @@ func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestI // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ] + // ], + // "supportsSubscription": true // } } -// method id "storage.objects.update": - -type ObjectsUpdateCall struct { - s *Service - bucket string - object string - object2 *Object - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Update: Updates an object's metadata. -// -// - bucket: Name of the bucket in which the object resides. -// - object: Name of the object. For information about how to URL encode -// object names to be path safe, see Encoding URI Path Parts. -func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { - c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.bucket = bucket - c.object = object - c.object2 = object2 - return c -} - -// Generation sets the optional parameter "generation": If present, -// selects a specific revision of this object (as opposed to the latest -// version, the default). -func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { - c.urlParams_.Set("generation", fmt.Sprint(generation)) - return c -} - -// IfGenerationMatch sets the optional parameter "ifGenerationMatch": -// Makes the operation conditional on whether the object's current -// generation matches the given value. Setting to 0 makes the operation -// succeed only if there are no live versions of the object. -func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) - return c -} - -// IfGenerationNotMatch sets the optional parameter -// "ifGenerationNotMatch": Makes the operation conditional on whether -// the object's current generation does not match the given value. If no -// live object exists, the precondition fails. Setting to 0 makes the -// operation succeed only if there is a live version of the object. -func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) - return c -} - -// IfMetagenerationMatch sets the optional parameter -// "ifMetagenerationMatch": Makes the operation conditional on whether -// the object's current metageneration matches the given value. -func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) - return c -} - -// IfMetagenerationNotMatch sets the optional parameter -// "ifMetagenerationNotMatch": Makes the operation conditional on -// whether the object's current metageneration does not match the given -// value. -func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { - c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) - return c -} - -// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a -// predefined set of access controls to this object. -// -// Possible values: -// -// "authenticatedRead" - Object owner gets OWNER access, and -// -// allAuthenticatedUsers get READER access. -// -// "bucketOwnerFullControl" - Object owner gets OWNER access, and -// -// project team owners get OWNER access. -// -// "bucketOwnerRead" - Object owner gets OWNER access, and project -// -// team owners get READER access. -// -// "private" - Object owner gets OWNER access. -// "projectPrivate" - Object owner gets OWNER access, and project team -// -// members get access according to their roles. -// -// "publicRead" - Object owner gets OWNER access, and allUsers get -// -// READER access. -func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { - c.urlParams_.Set("predefinedAcl", predefinedAcl) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to full. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { - c.urlParams_.Set("projection", projection) - return c +// method id "storage.buckets.operations.cancel": + +type OperationsCancelCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { - c.urlParams_.Set("userProject", userProject) +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Cancel(bucket string, operationId string) *OperationsCancelCall { + c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { +func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -11784,21 +15708,21 @@ func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { +func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsUpdateCall) Header() http.Header { +func (c *OperationsCancelCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { +func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -11806,324 +15730,322 @@ func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PUT", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "bucket": c.bucket, - "object": c.object, + "bucket": c.bucket, + "operationId": c.operationId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.update" call. -// Exactly one of *Object or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Object.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { +// Do executes the "storage.buckets.operations.cancel" call. +func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } if err != nil { - return nil, err + return err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Object{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err + return gensupport.WrapError(err) } - return ret, nil + return nil // { - // "description": "Updates an object's metadata.", - // "httpMethod": "PUT", - // "id": "storage.objects.update", + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + // "httpMethod": "POST", + // "id": "storage.buckets.operations.cancel", // "parameterOrder": [ // "bucket", - // "object" + // "operationId" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which the object resides.", + // "description": "The parent bucket of the operation resource.", // "location": "path", // "required": true, // "type": "string" // }, - // "generation": { - // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifGenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "ifMetagenerationNotMatch": { - // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", - // "format": "int64", - // "location": "query", - // "type": "string" - // }, - // "object": { - // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "operationId": { + // "description": "The ID of the operation resource.", // "location": "path", // "required": true, // "type": "string" - // }, - // "predefinedAcl": { - // "description": "Apply a predefined set of access controls to this object.", - // "enum": [ - // "authenticatedRead", - // "bucketOwnerFullControl", - // "bucketOwnerRead", - // "private", - // "projectPrivate", - // "publicRead" - // ], - // "enumDescriptions": [ - // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", - // "Object owner gets OWNER access, and project team owners get OWNER access.", - // "Object owner gets OWNER access, and project team owners get READER access.", - // "Object owner gets OWNER access.", - // "Object owner gets OWNER access, and project team members get access according to their roles.", - // "Object owner gets OWNER access, and allUsers get READER access." - // ], - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to full.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", + // } + // }, + // "path": "b/{bucket}/operations/{operationId}/cancel", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.operations.get": + +type OperationsGetCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Get(bucket string, operationId string) *OperationsGetCall { + c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.get" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation.", + // "httpMethod": "GET", + // "id": "storage.buckets.operations.get", + // "parameterOrder": [ + // "bucket", + // "operationId" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the operation resource.", + // "location": "path", + // "required": true, // "type": "string" // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", + // "operationId": { + // "description": "The ID of the operation resource.", + // "location": "path", + // "required": true, // "type": "string" // } // }, - // "path": "b/{bucket}/o/{object}", - // "request": { - // "$ref": "Object" - // }, + // "path": "b/{bucket}/operations/{operationId}", // "response": { - // "$ref": "Object" + // "$ref": "GoogleLongrunningOperation" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control" + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" // ] // } } -// method id "storage.objects.watchAll": +// method id "storage.buckets.operations.list": -type ObjectsWatchAllCall struct { - s *Service - bucket string - channel *Channel - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type OperationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header } -// WatchAll: Watch for changes on all objects in a bucket. +// List: Lists operations that match the specified filter in the +// request. // -// - bucket: Name of the bucket in which to look for objects. -func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { - c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - bucket: Name of the bucket in which to look for operations. +func (r *OperationsService) List(bucket string) *OperationsListCall { + c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.bucket = bucket - c.channel = channel - return c -} - -// Delimiter sets the optional parameter "delimiter": Returns results in -// a directory-like mode. items will contain only objects whose names, -// aside from the prefix, do not contain delimiter. Objects whose names, -// aside from the prefix, contain delimiter will have their name, -// truncated after the delimiter, returned in prefixes. Duplicate -// prefixes are omitted. -func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { - c.urlParams_.Set("delimiter", delimiter) - return c -} - -// EndOffset sets the optional parameter "endOffset": Filter results to -// objects whose names are lexicographically before endOffset. If -// startOffset is also set, the objects listed will have names between -// startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { - c.urlParams_.Set("endOffset", endOffset) return c } -// IncludeTrailingDelimiter sets the optional parameter -// "includeTrailingDelimiter": If true, objects that end in exactly one -// instance of delimiter will have their metadata included in items in -// addition to prefixes. -func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { - c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) +// Filter sets the optional parameter "filter": A filter to narrow down +// results to a preferred subset. The filtering language is documented +// in more detail in AIP-160 (https://google.aip.dev/160). +func (c *OperationsListCall) Filter(filter string) *OperationsListCall { + c.urlParams_.Set("filter", filter) return c } -// MaxResults sets the optional parameter "maxResults": Maximum number -// of items plus prefixes to return in a single page of responses. As -// duplicate prefixes are omitted, fewer total results may be returned -// than requested. The service will use this parameter or 1,000 items, -// whichever is smaller. -func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { - c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) +// PageSize sets the optional parameter "pageSize": Maximum number of +// items to return in a single page of responses. Fewer total results +// may be returned than requested. The service uses this parameter or +// 100 items, whichever is smaller. +func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A // previously-returned page token representing part of the larger set of // results to view. -func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { +func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } -// Prefix sets the optional parameter "prefix": Filter results to -// objects whose names begin with this prefix. -func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { - c.urlParams_.Set("prefix", prefix) - return c -} - -// Projection sets the optional parameter "projection": Set of -// properties to return. Defaults to noAcl. -// -// Possible values: -// -// "full" - Include all properties. -// "noAcl" - Omit the owner, acl property. -func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { - c.urlParams_.Set("projection", projection) - return c -} - -// StartOffset sets the optional parameter "startOffset": Filter results -// to objects whose names are lexicographically equal to or after -// startOffset. If endOffset is also set, the objects listed will have -// names between startOffset (inclusive) and endOffset (exclusive). -func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { - c.urlParams_.Set("startOffset", startOffset) - return c -} - -// UserProject sets the optional parameter "userProject": The project to -// be billed for this request. Required for Requester Pays buckets. -func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { - c.urlParams_.Set("userProject", userProject) - return c -} - -// Versions sets the optional parameter "versions": If true, lists all -// versions of an object as distinct results. The default is false. For -// more information, see Object Versioning. -func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { - c.urlParams_.Set("versions", fmt.Sprint(versions)) - return c -} - // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { +func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { +func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *ObjectsWatchAllCall) Header() http.Header { +func (c *OperationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { +func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } @@ -12134,14 +16056,15 @@ func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "storage.objects.watchAll" call. -// Exactly one of *Channel or error will be non-nil. Any non-2xx status -// code is an error. Response headers are in either -// *Channel.ServerResponse.Header or (if a response was returned at all) -// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to -// check whether the returned error was because http.StatusNotModified -// was returned. -func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { +// Do executes the "storage.buckets.operations.list" call. +// Exactly one of *GoogleLongrunningListOperationsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GoogleLongrunningListOperationsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningListOperationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -12160,7 +16083,7 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &Channel{ + ret := &GoogleLongrunningListOperationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -12172,38 +16095,27 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) } return ret, nil // { - // "description": "Watch for changes on all objects in a bucket.", - // "httpMethod": "POST", - // "id": "storage.objects.watchAll", + // "description": "Lists operations that match the specified filter in the request.", + // "httpMethod": "GET", + // "id": "storage.buckets.operations.list", // "parameterOrder": [ // "bucket" // ], // "parameters": { // "bucket": { - // "description": "Name of the bucket in which to look for objects.", + // "description": "Name of the bucket in which to look for operations.", // "location": "path", // "required": true, // "type": "string" // }, - // "delimiter": { - // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", - // "location": "query", - // "type": "string" - // }, - // "endOffset": { - // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "filter": { + // "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", // "location": "query", // "type": "string" // }, - // "includeTrailingDelimiter": { - // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", - // "location": "query", - // "type": "boolean" - // }, - // "maxResults": { - // "default": "1000", - // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", - // "format": "uint32", + // "pageSize": { + // "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + // "format": "int32", // "location": "query", // "minimum": "0", // "type": "integer" @@ -12212,48 +16124,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "description": "A previously-returned page token representing part of the larger set of results to view.", // "location": "query", // "type": "string" - // }, - // "prefix": { - // "description": "Filter results to objects whose names begin with this prefix.", - // "location": "query", - // "type": "string" - // }, - // "projection": { - // "description": "Set of properties to return. Defaults to noAcl.", - // "enum": [ - // "full", - // "noAcl" - // ], - // "enumDescriptions": [ - // "Include all properties.", - // "Omit the owner, acl property." - // ], - // "location": "query", - // "type": "string" - // }, - // "startOffset": { - // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", - // "location": "query", - // "type": "string" - // }, - // "userProject": { - // "description": "The project to be billed for this request. Required for Requester Pays buckets.", - // "location": "query", - // "type": "string" - // }, - // "versions": { - // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", - // "location": "query", - // "type": "boolean" // } // }, - // "path": "b/{bucket}/o/watch", - // "request": { - // "$ref": "Channel", - // "parameterName": "resource" - // }, + // "path": "b/{bucket}/operations", // "response": { - // "$ref": "Channel" + // "$ref": "GoogleLongrunningListOperationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -12261,12 +16136,32 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only", // "https://www.googleapis.com/auth/devstorage.read_write" - // ], - // "supportsSubscription": true + // ] // } } +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OperationsListCall) Pages(ctx context.Context, f func(*GoogleLongrunningListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "storage.projects.hmacKeys.create": type ProjectsHmacKeysCreateCall struct { diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index e1403e08ee..87a22f7586 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -14,10 +14,12 @@ import ( "net" "os" "strings" + "time" "cloud.google.com/go/compute/metadata" "go.opencensus.io/plugin/ocgrpc" "golang.org/x/oauth2" + "golang.org/x/time/rate" "google.golang.org/api/internal" "google.golang.org/api/option" "google.golang.org/grpc" @@ -35,12 +37,12 @@ const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" // Check env to decide if using google-c2p resolver for DirectPath traffic. const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineDialerHook func(context.Context) grpc.DialOption - // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. var timeoutDialerOption grpc.DialOption +// Log rate limiter +var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} + // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { @@ -156,6 +158,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C ) // Attempt Direct Path: + logRateLimiter.Do(func() { + logDirectPathMisconfig(endpoint, creds.TokenSource, o) + }) if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ @@ -186,12 +191,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C } } - if appengineDialerHook != nil { - // Use the Socket API on App Engine. - // appengine dialer will override socketopt dialer - grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) - } - // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. @@ -305,6 +304,24 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } +func logDirectPathMisconfig(endpoint string, ts oauth2.TokenSource, o *internal.DialSettings) { + if isDirectPathXdsUsed(o) { + // Case 1: does not enable DirectPath + if !isDirectPathEnabled(endpoint, o) { + log.Println("WARNING: DirectPath is misconfigured. Please set the EnableDirectPath option along with the EnableDirectPathXds option.") + } else { + // Case 2: credential is not correctly set + if !isTokenSourceDirectPathCompatible(ts, o) { + log.Println("WARNING: DirectPath is misconfigured. Please make sure the token source is fetched from GCE metadata server and the default service account is used.") + } + // Case 3: not running on GCE + if !metadata.OnGCE() { + log.Println("WARNING: DirectPath is misconfigured. DirectPath is only available in a GCE environment.") + } + } + } +} + func processAndValidateOpts(opts []option.ClientOption) (*internal.DialSettings, error) { var o internal.DialSettings for _, opt := range opts { diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go deleted file mode 100644 index fd3dc0565d..0000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package grpc - -import ( - "context" - "net" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/socket" - "google.golang.org/grpc" -) - -func init() { - // NOTE: dev_appserver doesn't currently support SSL. - // When it does, this code can be removed. - if appengine.IsDevAppServer() { - return - } - - appengineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index eca0c3ba79..a07362ffdb 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -145,22 +145,13 @@ func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error return rt.RoundTrip(&newReq) } -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper - -// defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport. -// Otherwise, use a default transport, taking most defaults from -// http.DefaultTransport. +// defaultBaseTransport returns the base HTTP transport. It uses a default +// transport, taking most defaults from http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - if appengineUrlfetchHook != nil { - return appengineUrlfetchHook(ctx) - } - // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, - // which is increased due to reported performance issues under load in the GCS - // client. Transport.Clone is only available in Go 1.13 and up. + // which is increased due to reported performance issues under load in the + // GCS client. Transport.Clone is only available in Go 1.13 and up. trans := clonedTransport(http.DefaultTransport) if trans == nil { trans = fallbackBaseTransport() diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go deleted file mode 100644 index f064e133f7..0000000000 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package http - -import ( - "context" - "net/http" - - "google.golang.org/appengine/urlfetch" -) - -func init() { - appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { - return &urlfetch.Transport{Context: ctx} - } -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go deleted file mode 100644 index 4ec872e460..0000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +++ /dev/null @@ -1,2822 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/socket/socket_service.proto - -package socket - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RemoteSocketServiceError_ErrorCode int32 - -const ( - RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 - RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 - RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 - RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 - RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 - RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 -) - -var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ - 1: "SYSTEM_ERROR", - 2: "GAI_ERROR", - 4: "FAILURE", - 5: "PERMISSION_DENIED", - 6: "INVALID_REQUEST", - 7: "SOCKET_CLOSED", -} -var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ - "SYSTEM_ERROR": 1, - "GAI_ERROR": 2, - "FAILURE": 4, - "PERMISSION_DENIED": 5, - "INVALID_REQUEST": 6, - "SOCKET_CLOSED": 7, -} - -func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { - p := new(RemoteSocketServiceError_ErrorCode) - *p = x - return p -} -func (x RemoteSocketServiceError_ErrorCode) String() string { - return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) -} -func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") - if err != nil { - return err - } - *x = RemoteSocketServiceError_ErrorCode(value) - return nil -} -func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} -} - -type RemoteSocketServiceError_SystemError int32 - -const ( - RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 - RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 - RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 - RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 - RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 - RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 - RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 - RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 - RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 - RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 - RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 - RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 - RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 - RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 - RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 - RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 - RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 - RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 - RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 - RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 - RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 - RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 - RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 - RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 - RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 - RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 - RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 - RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 - RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 - RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 - RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 - RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 - RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 - RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 - RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 - RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 - RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 - RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 - RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 - RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 - RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 - RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 - RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 - RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 - RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 - RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 - RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 - RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 - RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 - RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 - RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 - RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 - RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 - RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 - RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 - RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 - RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 - RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 - RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 - RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 - RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 - RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 - RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 - RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 - RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 - RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 - RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 - RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 - RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 - RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 - RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 - RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 - RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 - RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 - RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 - RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 - RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 - RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 - RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 - RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 - RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 - RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 - RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 - RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 - RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 - RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 - RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 - RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 - RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 - RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 - RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 - RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 - RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 - RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 - RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 - RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 - RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 - RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 - RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 - RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 - RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 - RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 - RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 - RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 - RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 - RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 - RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 - RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 - RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 - RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 - RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 - RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 - RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 - RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 - RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 - RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 - RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 - RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 - RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 - RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 - RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 - RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 - RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 - RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 - RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 - RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 - RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 - RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 -) - -var RemoteSocketServiceError_SystemError_name = map[int32]string{ - 0: "SYS_SUCCESS", - 1: "SYS_EPERM", - 2: "SYS_ENOENT", - 3: "SYS_ESRCH", - 4: "SYS_EINTR", - 5: "SYS_EIO", - 6: "SYS_ENXIO", - 7: "SYS_E2BIG", - 8: "SYS_ENOEXEC", - 9: "SYS_EBADF", - 10: "SYS_ECHILD", - 11: "SYS_EAGAIN", - // Duplicate value: 11: "SYS_EWOULDBLOCK", - 12: "SYS_ENOMEM", - 13: "SYS_EACCES", - 14: "SYS_EFAULT", - 15: "SYS_ENOTBLK", - 16: "SYS_EBUSY", - 17: "SYS_EEXIST", - 18: "SYS_EXDEV", - 19: "SYS_ENODEV", - 20: "SYS_ENOTDIR", - 21: "SYS_EISDIR", - 22: "SYS_EINVAL", - 23: "SYS_ENFILE", - 24: "SYS_EMFILE", - 25: "SYS_ENOTTY", - 26: "SYS_ETXTBSY", - 27: "SYS_EFBIG", - 28: "SYS_ENOSPC", - 29: "SYS_ESPIPE", - 30: "SYS_EROFS", - 31: "SYS_EMLINK", - 32: "SYS_EPIPE", - 33: "SYS_EDOM", - 34: "SYS_ERANGE", - 35: "SYS_EDEADLK", - // Duplicate value: 35: "SYS_EDEADLOCK", - 36: "SYS_ENAMETOOLONG", - 37: "SYS_ENOLCK", - 38: "SYS_ENOSYS", - 39: "SYS_ENOTEMPTY", - 40: "SYS_ELOOP", - 42: "SYS_ENOMSG", - 43: "SYS_EIDRM", - 44: "SYS_ECHRNG", - 45: "SYS_EL2NSYNC", - 46: "SYS_EL3HLT", - 47: "SYS_EL3RST", - 48: "SYS_ELNRNG", - 49: "SYS_EUNATCH", - 50: "SYS_ENOCSI", - 51: "SYS_EL2HLT", - 52: "SYS_EBADE", - 53: "SYS_EBADR", - 54: "SYS_EXFULL", - 55: "SYS_ENOANO", - 56: "SYS_EBADRQC", - 57: "SYS_EBADSLT", - 59: "SYS_EBFONT", - 60: "SYS_ENOSTR", - 61: "SYS_ENODATA", - 62: "SYS_ETIME", - 63: "SYS_ENOSR", - 64: "SYS_ENONET", - 65: "SYS_ENOPKG", - 66: "SYS_EREMOTE", - 67: "SYS_ENOLINK", - 68: "SYS_EADV", - 69: "SYS_ESRMNT", - 70: "SYS_ECOMM", - 71: "SYS_EPROTO", - 72: "SYS_EMULTIHOP", - 73: "SYS_EDOTDOT", - 74: "SYS_EBADMSG", - 75: "SYS_EOVERFLOW", - 76: "SYS_ENOTUNIQ", - 77: "SYS_EBADFD", - 78: "SYS_EREMCHG", - 79: "SYS_ELIBACC", - 80: "SYS_ELIBBAD", - 81: "SYS_ELIBSCN", - 82: "SYS_ELIBMAX", - 83: "SYS_ELIBEXEC", - 84: "SYS_EILSEQ", - 85: "SYS_ERESTART", - 86: "SYS_ESTRPIPE", - 87: "SYS_EUSERS", - 88: "SYS_ENOTSOCK", - 89: "SYS_EDESTADDRREQ", - 90: "SYS_EMSGSIZE", - 91: "SYS_EPROTOTYPE", - 92: "SYS_ENOPROTOOPT", - 93: "SYS_EPROTONOSUPPORT", - 94: "SYS_ESOCKTNOSUPPORT", - 95: "SYS_EOPNOTSUPP", - // Duplicate value: 95: "SYS_ENOTSUP", - 96: "SYS_EPFNOSUPPORT", - 97: "SYS_EAFNOSUPPORT", - 98: "SYS_EADDRINUSE", - 99: "SYS_EADDRNOTAVAIL", - 100: "SYS_ENETDOWN", - 101: "SYS_ENETUNREACH", - 102: "SYS_ENETRESET", - 103: "SYS_ECONNABORTED", - 104: "SYS_ECONNRESET", - 105: "SYS_ENOBUFS", - 106: "SYS_EISCONN", - 107: "SYS_ENOTCONN", - 108: "SYS_ESHUTDOWN", - 109: "SYS_ETOOMANYREFS", - 110: "SYS_ETIMEDOUT", - 111: "SYS_ECONNREFUSED", - 112: "SYS_EHOSTDOWN", - 113: "SYS_EHOSTUNREACH", - 114: "SYS_EALREADY", - 115: "SYS_EINPROGRESS", - 116: "SYS_ESTALE", - 117: "SYS_EUCLEAN", - 118: "SYS_ENOTNAM", - 119: "SYS_ENAVAIL", - 120: "SYS_EISNAM", - 121: "SYS_EREMOTEIO", - 122: "SYS_EDQUOT", - 123: "SYS_ENOMEDIUM", - 124: "SYS_EMEDIUMTYPE", - 125: "SYS_ECANCELED", - 126: "SYS_ENOKEY", - 127: "SYS_EKEYEXPIRED", - 128: "SYS_EKEYREVOKED", - 129: "SYS_EKEYREJECTED", - 130: "SYS_EOWNERDEAD", - 131: "SYS_ENOTRECOVERABLE", - 132: "SYS_ERFKILL", -} -var RemoteSocketServiceError_SystemError_value = map[string]int32{ - "SYS_SUCCESS": 0, - "SYS_EPERM": 1, - "SYS_ENOENT": 2, - "SYS_ESRCH": 3, - "SYS_EINTR": 4, - "SYS_EIO": 5, - "SYS_ENXIO": 6, - "SYS_E2BIG": 7, - "SYS_ENOEXEC": 8, - "SYS_EBADF": 9, - "SYS_ECHILD": 10, - "SYS_EAGAIN": 11, - "SYS_EWOULDBLOCK": 11, - "SYS_ENOMEM": 12, - "SYS_EACCES": 13, - "SYS_EFAULT": 14, - "SYS_ENOTBLK": 15, - "SYS_EBUSY": 16, - "SYS_EEXIST": 17, - "SYS_EXDEV": 18, - "SYS_ENODEV": 19, - "SYS_ENOTDIR": 20, - "SYS_EISDIR": 21, - "SYS_EINVAL": 22, - "SYS_ENFILE": 23, - "SYS_EMFILE": 24, - "SYS_ENOTTY": 25, - "SYS_ETXTBSY": 26, - "SYS_EFBIG": 27, - "SYS_ENOSPC": 28, - "SYS_ESPIPE": 29, - "SYS_EROFS": 30, - "SYS_EMLINK": 31, - "SYS_EPIPE": 32, - "SYS_EDOM": 33, - "SYS_ERANGE": 34, - "SYS_EDEADLK": 35, - "SYS_EDEADLOCK": 35, - "SYS_ENAMETOOLONG": 36, - "SYS_ENOLCK": 37, - "SYS_ENOSYS": 38, - "SYS_ENOTEMPTY": 39, - "SYS_ELOOP": 40, - "SYS_ENOMSG": 42, - "SYS_EIDRM": 43, - "SYS_ECHRNG": 44, - "SYS_EL2NSYNC": 45, - "SYS_EL3HLT": 46, - "SYS_EL3RST": 47, - "SYS_ELNRNG": 48, - "SYS_EUNATCH": 49, - "SYS_ENOCSI": 50, - "SYS_EL2HLT": 51, - "SYS_EBADE": 52, - "SYS_EBADR": 53, - "SYS_EXFULL": 54, - "SYS_ENOANO": 55, - "SYS_EBADRQC": 56, - "SYS_EBADSLT": 57, - "SYS_EBFONT": 59, - "SYS_ENOSTR": 60, - "SYS_ENODATA": 61, - "SYS_ETIME": 62, - "SYS_ENOSR": 63, - "SYS_ENONET": 64, - "SYS_ENOPKG": 65, - "SYS_EREMOTE": 66, - "SYS_ENOLINK": 67, - "SYS_EADV": 68, - "SYS_ESRMNT": 69, - "SYS_ECOMM": 70, - "SYS_EPROTO": 71, - "SYS_EMULTIHOP": 72, - "SYS_EDOTDOT": 73, - "SYS_EBADMSG": 74, - "SYS_EOVERFLOW": 75, - "SYS_ENOTUNIQ": 76, - "SYS_EBADFD": 77, - "SYS_EREMCHG": 78, - "SYS_ELIBACC": 79, - "SYS_ELIBBAD": 80, - "SYS_ELIBSCN": 81, - "SYS_ELIBMAX": 82, - "SYS_ELIBEXEC": 83, - "SYS_EILSEQ": 84, - "SYS_ERESTART": 85, - "SYS_ESTRPIPE": 86, - "SYS_EUSERS": 87, - "SYS_ENOTSOCK": 88, - "SYS_EDESTADDRREQ": 89, - "SYS_EMSGSIZE": 90, - "SYS_EPROTOTYPE": 91, - "SYS_ENOPROTOOPT": 92, - "SYS_EPROTONOSUPPORT": 93, - "SYS_ESOCKTNOSUPPORT": 94, - "SYS_EOPNOTSUPP": 95, - "SYS_ENOTSUP": 95, - "SYS_EPFNOSUPPORT": 96, - "SYS_EAFNOSUPPORT": 97, - "SYS_EADDRINUSE": 98, - "SYS_EADDRNOTAVAIL": 99, - "SYS_ENETDOWN": 100, - "SYS_ENETUNREACH": 101, - "SYS_ENETRESET": 102, - "SYS_ECONNABORTED": 103, - "SYS_ECONNRESET": 104, - "SYS_ENOBUFS": 105, - "SYS_EISCONN": 106, - "SYS_ENOTCONN": 107, - "SYS_ESHUTDOWN": 108, - "SYS_ETOOMANYREFS": 109, - "SYS_ETIMEDOUT": 110, - "SYS_ECONNREFUSED": 111, - "SYS_EHOSTDOWN": 112, - "SYS_EHOSTUNREACH": 113, - "SYS_EALREADY": 114, - "SYS_EINPROGRESS": 115, - "SYS_ESTALE": 116, - "SYS_EUCLEAN": 117, - "SYS_ENOTNAM": 118, - "SYS_ENAVAIL": 119, - "SYS_EISNAM": 120, - "SYS_EREMOTEIO": 121, - "SYS_EDQUOT": 122, - "SYS_ENOMEDIUM": 123, - "SYS_EMEDIUMTYPE": 124, - "SYS_ECANCELED": 125, - "SYS_ENOKEY": 126, - "SYS_EKEYEXPIRED": 127, - "SYS_EKEYREVOKED": 128, - "SYS_EKEYREJECTED": 129, - "SYS_EOWNERDEAD": 130, - "SYS_ENOTRECOVERABLE": 131, - "SYS_ERFKILL": 132, -} - -func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { - p := new(RemoteSocketServiceError_SystemError) - *p = x - return p -} -func (x RemoteSocketServiceError_SystemError) String() string { - return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) -} -func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") - if err != nil { - return err - } - *x = RemoteSocketServiceError_SystemError(value) - return nil -} -func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} -} - -type CreateSocketRequest_SocketFamily int32 - -const ( - CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 - CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 -) - -var CreateSocketRequest_SocketFamily_name = map[int32]string{ - 1: "IPv4", - 2: "IPv6", -} -var CreateSocketRequest_SocketFamily_value = map[string]int32{ - "IPv4": 1, - "IPv6": 2, -} - -func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { - p := new(CreateSocketRequest_SocketFamily) - *p = x - return p -} -func (x CreateSocketRequest_SocketFamily) String() string { - return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) -} -func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketFamily(value) - return nil -} -func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} -} - -type CreateSocketRequest_SocketProtocol int32 - -const ( - CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 - CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 -) - -var CreateSocketRequest_SocketProtocol_name = map[int32]string{ - 1: "TCP", - 2: "UDP", -} -var CreateSocketRequest_SocketProtocol_value = map[string]int32{ - "TCP": 1, - "UDP": 2, -} - -func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { - p := new(CreateSocketRequest_SocketProtocol) - *p = x - return p -} -func (x CreateSocketRequest_SocketProtocol) String() string { - return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) -} -func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketProtocol(value) - return nil -} -func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} -} - -type SocketOption_SocketOptionLevel int32 - -const ( - SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 - SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 - SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 - SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 -) - -var SocketOption_SocketOptionLevel_name = map[int32]string{ - 0: "SOCKET_SOL_IP", - 1: "SOCKET_SOL_SOCKET", - 6: "SOCKET_SOL_TCP", - 17: "SOCKET_SOL_UDP", -} -var SocketOption_SocketOptionLevel_value = map[string]int32{ - "SOCKET_SOL_IP": 0, - "SOCKET_SOL_SOCKET": 1, - "SOCKET_SOL_TCP": 6, - "SOCKET_SOL_UDP": 17, -} - -func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { - p := new(SocketOption_SocketOptionLevel) - *p = x - return p -} -func (x SocketOption_SocketOptionLevel) String() string { - return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) -} -func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") - if err != nil { - return err - } - *x = SocketOption_SocketOptionLevel(value) - return nil -} -func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} -} - -type SocketOption_SocketOptionName int32 - -const ( - SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 - SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 - SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 - SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 - SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 -) - -var SocketOption_SocketOptionName_name = map[int32]string{ - 1: "SOCKET_SO_DEBUG", - 2: "SOCKET_SO_REUSEADDR", - 3: "SOCKET_SO_TYPE", - 4: "SOCKET_SO_ERROR", - 5: "SOCKET_SO_DONTROUTE", - 6: "SOCKET_SO_BROADCAST", - 7: "SOCKET_SO_SNDBUF", - 8: "SOCKET_SO_RCVBUF", - 9: "SOCKET_SO_KEEPALIVE", - 10: "SOCKET_SO_OOBINLINE", - 13: "SOCKET_SO_LINGER", - 20: "SOCKET_SO_RCVTIMEO", - 21: "SOCKET_SO_SNDTIMEO", - // Duplicate value: 1: "SOCKET_IP_TOS", - // Duplicate value: 2: "SOCKET_IP_TTL", - // Duplicate value: 3: "SOCKET_IP_HDRINCL", - // Duplicate value: 4: "SOCKET_IP_OPTIONS", - // Duplicate value: 1: "SOCKET_TCP_NODELAY", - // Duplicate value: 2: "SOCKET_TCP_MAXSEG", - // Duplicate value: 3: "SOCKET_TCP_CORK", - // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", - // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", - // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", - // Duplicate value: 7: "SOCKET_TCP_SYNCNT", - // Duplicate value: 8: "SOCKET_TCP_LINGER2", - // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", - // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", - 11: "SOCKET_TCP_INFO", - 12: "SOCKET_TCP_QUICKACK", -} -var SocketOption_SocketOptionName_value = map[string]int32{ - "SOCKET_SO_DEBUG": 1, - "SOCKET_SO_REUSEADDR": 2, - "SOCKET_SO_TYPE": 3, - "SOCKET_SO_ERROR": 4, - "SOCKET_SO_DONTROUTE": 5, - "SOCKET_SO_BROADCAST": 6, - "SOCKET_SO_SNDBUF": 7, - "SOCKET_SO_RCVBUF": 8, - "SOCKET_SO_KEEPALIVE": 9, - "SOCKET_SO_OOBINLINE": 10, - "SOCKET_SO_LINGER": 13, - "SOCKET_SO_RCVTIMEO": 20, - "SOCKET_SO_SNDTIMEO": 21, - "SOCKET_IP_TOS": 1, - "SOCKET_IP_TTL": 2, - "SOCKET_IP_HDRINCL": 3, - "SOCKET_IP_OPTIONS": 4, - "SOCKET_TCP_NODELAY": 1, - "SOCKET_TCP_MAXSEG": 2, - "SOCKET_TCP_CORK": 3, - "SOCKET_TCP_KEEPIDLE": 4, - "SOCKET_TCP_KEEPINTVL": 5, - "SOCKET_TCP_KEEPCNT": 6, - "SOCKET_TCP_SYNCNT": 7, - "SOCKET_TCP_LINGER2": 8, - "SOCKET_TCP_DEFER_ACCEPT": 9, - "SOCKET_TCP_WINDOW_CLAMP": 10, - "SOCKET_TCP_INFO": 11, - "SOCKET_TCP_QUICKACK": 12, -} - -func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { - p := new(SocketOption_SocketOptionName) - *p = x - return p -} -func (x SocketOption_SocketOptionName) String() string { - return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) -} -func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") - if err != nil { - return err - } - *x = SocketOption_SocketOptionName(value) - return nil -} -func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} -} - -type ShutDownRequest_How int32 - -const ( - ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 - ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 - ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 -) - -var ShutDownRequest_How_name = map[int32]string{ - 1: "SOCKET_SHUT_RD", - 2: "SOCKET_SHUT_WR", - 3: "SOCKET_SHUT_RDWR", -} -var ShutDownRequest_How_value = map[string]int32{ - "SOCKET_SHUT_RD": 1, - "SOCKET_SHUT_WR": 2, - "SOCKET_SHUT_RDWR": 3, -} - -func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { - p := new(ShutDownRequest_How) - *p = x - return p -} -func (x ShutDownRequest_How) String() string { - return proto.EnumName(ShutDownRequest_How_name, int32(x)) -} -func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") - if err != nil { - return err - } - *x = ShutDownRequest_How(value) - return nil -} -func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} -} - -type ReceiveRequest_Flags int32 - -const ( - ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 - ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 -) - -var ReceiveRequest_Flags_name = map[int32]string{ - 1: "MSG_OOB", - 2: "MSG_PEEK", -} -var ReceiveRequest_Flags_value = map[string]int32{ - "MSG_OOB": 1, - "MSG_PEEK": 2, -} - -func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { - p := new(ReceiveRequest_Flags) - *p = x - return p -} -func (x ReceiveRequest_Flags) String() string { - return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) -} -func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") - if err != nil { - return err - } - *x = ReceiveRequest_Flags(value) - return nil -} -func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} -} - -type PollEvent_PollEventFlag int32 - -const ( - PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 - PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 - PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 - PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 - PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 - PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 - PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 - PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 - PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 - PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 - PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 - PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 - PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 - PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 -) - -var PollEvent_PollEventFlag_name = map[int32]string{ - 0: "SOCKET_POLLNONE", - 1: "SOCKET_POLLIN", - 2: "SOCKET_POLLPRI", - 4: "SOCKET_POLLOUT", - 8: "SOCKET_POLLERR", - 16: "SOCKET_POLLHUP", - 32: "SOCKET_POLLNVAL", - 64: "SOCKET_POLLRDNORM", - 128: "SOCKET_POLLRDBAND", - 256: "SOCKET_POLLWRNORM", - 512: "SOCKET_POLLWRBAND", - 1024: "SOCKET_POLLMSG", - 4096: "SOCKET_POLLREMOVE", - 8192: "SOCKET_POLLRDHUP", -} -var PollEvent_PollEventFlag_value = map[string]int32{ - "SOCKET_POLLNONE": 0, - "SOCKET_POLLIN": 1, - "SOCKET_POLLPRI": 2, - "SOCKET_POLLOUT": 4, - "SOCKET_POLLERR": 8, - "SOCKET_POLLHUP": 16, - "SOCKET_POLLNVAL": 32, - "SOCKET_POLLRDNORM": 64, - "SOCKET_POLLRDBAND": 128, - "SOCKET_POLLWRNORM": 256, - "SOCKET_POLLWRBAND": 512, - "SOCKET_POLLMSG": 1024, - "SOCKET_POLLREMOVE": 4096, - "SOCKET_POLLRDHUP": 8192, -} - -func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { - p := new(PollEvent_PollEventFlag) - *p = x - return p -} -func (x PollEvent_PollEventFlag) String() string { - return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) -} -func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") - if err != nil { - return err - } - *x = PollEvent_PollEventFlag(value) - return nil -} -func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} -} - -type ResolveReply_ErrorCode int32 - -const ( - ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 - ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 - ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 - ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 - ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 - ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 - ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 - ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 - ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 - ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 - ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 - ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 - ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 - ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 - ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 -) - -var ResolveReply_ErrorCode_name = map[int32]string{ - 1: "SOCKET_EAI_ADDRFAMILY", - 2: "SOCKET_EAI_AGAIN", - 3: "SOCKET_EAI_BADFLAGS", - 4: "SOCKET_EAI_FAIL", - 5: "SOCKET_EAI_FAMILY", - 6: "SOCKET_EAI_MEMORY", - 7: "SOCKET_EAI_NODATA", - 8: "SOCKET_EAI_NONAME", - 9: "SOCKET_EAI_SERVICE", - 10: "SOCKET_EAI_SOCKTYPE", - 11: "SOCKET_EAI_SYSTEM", - 12: "SOCKET_EAI_BADHINTS", - 13: "SOCKET_EAI_PROTOCOL", - 14: "SOCKET_EAI_OVERFLOW", - 15: "SOCKET_EAI_MAX", -} -var ResolveReply_ErrorCode_value = map[string]int32{ - "SOCKET_EAI_ADDRFAMILY": 1, - "SOCKET_EAI_AGAIN": 2, - "SOCKET_EAI_BADFLAGS": 3, - "SOCKET_EAI_FAIL": 4, - "SOCKET_EAI_FAMILY": 5, - "SOCKET_EAI_MEMORY": 6, - "SOCKET_EAI_NODATA": 7, - "SOCKET_EAI_NONAME": 8, - "SOCKET_EAI_SERVICE": 9, - "SOCKET_EAI_SOCKTYPE": 10, - "SOCKET_EAI_SYSTEM": 11, - "SOCKET_EAI_BADHINTS": 12, - "SOCKET_EAI_PROTOCOL": 13, - "SOCKET_EAI_OVERFLOW": 14, - "SOCKET_EAI_MAX": 15, -} - -func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { - p := new(ResolveReply_ErrorCode) - *p = x - return p -} -func (x ResolveReply_ErrorCode) String() string { - return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) -} -func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") - if err != nil { - return err - } - *x = ResolveReply_ErrorCode(value) - return nil -} -func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} -} - -type RemoteSocketServiceError struct { - SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` - ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } -func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } -func (*RemoteSocketServiceError) ProtoMessage() {} -func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} -} -func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) -} -func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) -} -func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) -} -func (m *RemoteSocketServiceError) XXX_Size() int { - return xxx_messageInfo_RemoteSocketServiceError.Size(m) -} -func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo - -const Default_RemoteSocketServiceError_SystemError int32 = 0 - -func (m *RemoteSocketServiceError) GetSystemError() int32 { - if m != nil && m.SystemError != nil { - return *m.SystemError - } - return Default_RemoteSocketServiceError_SystemError -} - -func (m *RemoteSocketServiceError) GetErrorDetail() string { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return "" -} - -type AddressPort struct { - Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` - PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddressPort) Reset() { *m = AddressPort{} } -func (m *AddressPort) String() string { return proto.CompactTextString(m) } -func (*AddressPort) ProtoMessage() {} -func (*AddressPort) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} -} -func (m *AddressPort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddressPort.Unmarshal(m, b) -} -func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) -} -func (dst *AddressPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressPort.Merge(dst, src) -} -func (m *AddressPort) XXX_Size() int { - return xxx_messageInfo_AddressPort.Size(m) -} -func (m *AddressPort) XXX_DiscardUnknown() { - xxx_messageInfo_AddressPort.DiscardUnknown(m) -} - -var xxx_messageInfo_AddressPort proto.InternalMessageInfo - -func (m *AddressPort) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *AddressPort) GetPackedAddress() []byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *AddressPort) GetHostnameHint() string { - if m != nil && m.HostnameHint != nil { - return *m.HostnameHint - } - return "" -} - -type CreateSocketRequest struct { - Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` - Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` - ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } -func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSocketRequest) ProtoMessage() {} -func (*CreateSocketRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} -} -func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) -} -func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketRequest.Merge(dst, src) -} -func (m *CreateSocketRequest) XXX_Size() int { - return xxx_messageInfo_CreateSocketRequest.Size(m) -} -func (m *CreateSocketRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo - -const Default_CreateSocketRequest_ListenBacklog int32 = 0 - -func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { - if m != nil && m.Family != nil { - return *m.Family - } - return CreateSocketRequest_IPv4 -} - -func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return CreateSocketRequest_TCP -} - -func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { - if m != nil { - return m.SocketOptions - } - return nil -} - -func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -func (m *CreateSocketRequest) GetListenBacklog() int32 { - if m != nil && m.ListenBacklog != nil { - return *m.ListenBacklog - } - return Default_CreateSocketRequest_ListenBacklog -} - -func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *CreateSocketRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CreateSocketRequest) GetProjectId() int64 { - if m != nil && m.ProjectId != nil { - return *m.ProjectId - } - return 0 -} - -type CreateSocketReply struct { - SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } -func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } -func (*CreateSocketReply) ProtoMessage() {} -func (*CreateSocketReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} -} - -var extRange_CreateSocketReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_CreateSocketReply -} -func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) -} -func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) -} -func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketReply.Merge(dst, src) -} -func (m *CreateSocketReply) XXX_Size() int { - return xxx_messageInfo_CreateSocketReply.Size(m) -} -func (m *CreateSocketReply) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo - -func (m *CreateSocketReply) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CreateSocketReply) GetServerAddress() *AddressPort { - if m != nil { - return m.ServerAddress - } - return nil -} - -func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindRequest) Reset() { *m = BindRequest{} } -func (m *BindRequest) String() string { return proto.CompactTextString(m) } -func (*BindRequest) ProtoMessage() {} -func (*BindRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} -} -func (m *BindRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindRequest.Unmarshal(m, b) -} -func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) -} -func (dst *BindRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindRequest.Merge(dst, src) -} -func (m *BindRequest) XXX_Size() int { - return xxx_messageInfo_BindRequest.Size(m) -} -func (m *BindRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BindRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BindRequest proto.InternalMessageInfo - -func (m *BindRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *BindRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindReply) Reset() { *m = BindReply{} } -func (m *BindReply) String() string { return proto.CompactTextString(m) } -func (*BindReply) ProtoMessage() {} -func (*BindReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} -} -func (m *BindReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindReply.Unmarshal(m, b) -} -func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) -} -func (dst *BindReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindReply.Merge(dst, src) -} -func (m *BindReply) XXX_Size() int { - return xxx_messageInfo_BindReply.Size(m) -} -func (m *BindReply) XXX_DiscardUnknown() { - xxx_messageInfo_BindReply.DiscardUnknown(m) -} - -var xxx_messageInfo_BindReply proto.InternalMessageInfo - -func (m *BindReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetSocketNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } -func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameRequest) ProtoMessage() {} -func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} -} -func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) -} -func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) -} -func (m *GetSocketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketNameRequest.Size(m) -} -func (m *GetSocketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo - -func (m *GetSocketNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetSocketNameReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } -func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameReply) ProtoMessage() {} -func (*GetSocketNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} -} -func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) -} -func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameReply.Merge(dst, src) -} -func (m *GetSocketNameReply) XXX_Size() int { - return xxx_messageInfo_GetSocketNameReply.Size(m) -} -func (m *GetSocketNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo - -func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetPeerNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } -func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameRequest) ProtoMessage() {} -func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} -} -func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) -} -func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) -} -func (m *GetPeerNameRequest) XXX_Size() int { - return xxx_messageInfo_GetPeerNameRequest.Size(m) -} -func (m *GetPeerNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo - -func (m *GetPeerNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetPeerNameReply struct { - PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } -func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameReply) ProtoMessage() {} -func (*GetPeerNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} -} -func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) -} -func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameReply.Merge(dst, src) -} -func (m *GetPeerNameReply) XXX_Size() int { - return xxx_messageInfo_GetPeerNameReply.Size(m) -} -func (m *GetPeerNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo - -func (m *GetPeerNameReply) GetPeerIp() *AddressPort { - if m != nil { - return m.PeerIp - } - return nil -} - -type SocketOption struct { - Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` - Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SocketOption) Reset() { *m = SocketOption{} } -func (m *SocketOption) String() string { return proto.CompactTextString(m) } -func (*SocketOption) ProtoMessage() {} -func (*SocketOption) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} -} -func (m *SocketOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SocketOption.Unmarshal(m, b) -} -func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) -} -func (dst *SocketOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_SocketOption.Merge(dst, src) -} -func (m *SocketOption) XXX_Size() int { - return xxx_messageInfo_SocketOption.Size(m) -} -func (m *SocketOption) XXX_DiscardUnknown() { - xxx_messageInfo_SocketOption.DiscardUnknown(m) -} - -var xxx_messageInfo_SocketOption proto.InternalMessageInfo - -func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { - if m != nil && m.Level != nil { - return *m.Level - } - return SocketOption_SOCKET_SOL_IP -} - -func (m *SocketOption) GetOption() SocketOption_SocketOptionName { - if m != nil && m.Option != nil { - return *m.Option - } - return SocketOption_SOCKET_SO_DEBUG -} - -func (m *SocketOption) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } -func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsRequest) ProtoMessage() {} -func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} -} -func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) -} -func (m *SetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsRequest.Size(m) -} -func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo - -func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type SetSocketOptionsReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } -func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsReply) ProtoMessage() {} -func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} -} -func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) -} -func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) -} -func (m *SetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsReply.Size(m) -} -func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo - -type GetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } -func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsRequest) ProtoMessage() {} -func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} -} -func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) -} -func (m *GetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsRequest.Size(m) -} -func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo - -func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type GetSocketOptionsReply struct { - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } -func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsReply) ProtoMessage() {} -func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} -} -func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) -} -func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) -} -func (m *GetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsReply.Size(m) -} -func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo - -func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type ConnectRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) -} -func (dst *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(dst, src) -} -func (m *ConnectRequest) XXX_Size() int { - return xxx_messageInfo_ConnectRequest.Size(m) -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -const Default_ConnectRequest_TimeoutSeconds float64 = -1 - -func (m *ConnectRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ConnectRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *ConnectRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ConnectRequest_TimeoutSeconds -} - -type ConnectReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectReply) Reset() { *m = ConnectReply{} } -func (m *ConnectReply) String() string { return proto.CompactTextString(m) } -func (*ConnectReply) ProtoMessage() {} -func (*ConnectReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} -} - -var extRange_ConnectReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ConnectReply -} -func (m *ConnectReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectReply.Unmarshal(m, b) -} -func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) -} -func (dst *ConnectReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectReply.Merge(dst, src) -} -func (m *ConnectReply) XXX_Size() int { - return xxx_messageInfo_ConnectReply.Size(m) -} -func (m *ConnectReply) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectReply proto.InternalMessageInfo - -func (m *ConnectReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type ListenRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenRequest) Reset() { *m = ListenRequest{} } -func (m *ListenRequest) String() string { return proto.CompactTextString(m) } -func (*ListenRequest) ProtoMessage() {} -func (*ListenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} -} -func (m *ListenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenRequest.Unmarshal(m, b) -} -func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) -} -func (dst *ListenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenRequest.Merge(dst, src) -} -func (m *ListenRequest) XXX_Size() int { - return xxx_messageInfo_ListenRequest.Size(m) -} -func (m *ListenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenRequest proto.InternalMessageInfo - -func (m *ListenRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ListenRequest) GetBacklog() int32 { - if m != nil && m.Backlog != nil { - return *m.Backlog - } - return 0 -} - -type ListenReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenReply) Reset() { *m = ListenReply{} } -func (m *ListenReply) String() string { return proto.CompactTextString(m) } -func (*ListenReply) ProtoMessage() {} -func (*ListenReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} -} -func (m *ListenReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenReply.Unmarshal(m, b) -} -func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) -} -func (dst *ListenReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenReply.Merge(dst, src) -} -func (m *ListenReply) XXX_Size() int { - return xxx_messageInfo_ListenReply.Size(m) -} -func (m *ListenReply) XXX_DiscardUnknown() { - xxx_messageInfo_ListenReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenReply proto.InternalMessageInfo - -type AcceptRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } -func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } -func (*AcceptRequest) ProtoMessage() {} -func (*AcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} -} -func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) -} -func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) -} -func (dst *AcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptRequest.Merge(dst, src) -} -func (m *AcceptRequest) XXX_Size() int { - return xxx_messageInfo_AcceptRequest.Size(m) -} -func (m *AcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo - -const Default_AcceptRequest_TimeoutSeconds float64 = -1 - -func (m *AcceptRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *AcceptRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_AcceptRequest_TimeoutSeconds -} - -type AcceptReply struct { - NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` - RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptReply) Reset() { *m = AcceptReply{} } -func (m *AcceptReply) String() string { return proto.CompactTextString(m) } -func (*AcceptReply) ProtoMessage() {} -func (*AcceptReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} -} -func (m *AcceptReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptReply.Unmarshal(m, b) -} -func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) -} -func (dst *AcceptReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptReply.Merge(dst, src) -} -func (m *AcceptReply) XXX_Size() int { - return xxx_messageInfo_AcceptReply.Size(m) -} -func (m *AcceptReply) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptReply proto.InternalMessageInfo - -func (m *AcceptReply) GetNewSocketDescriptor() []byte { - if m != nil { - return m.NewSocketDescriptor - } - return nil -} - -func (m *AcceptReply) GetRemoteAddress() *AddressPort { - if m != nil { - return m.RemoteAddress - } - return nil -} - -type ShutDownRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` - SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } -func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } -func (*ShutDownRequest) ProtoMessage() {} -func (*ShutDownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} -} -func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) -} -func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) -} -func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownRequest.Merge(dst, src) -} -func (m *ShutDownRequest) XXX_Size() int { - return xxx_messageInfo_ShutDownRequest.Size(m) -} -func (m *ShutDownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo - -func (m *ShutDownRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ShutDownRequest) GetHow() ShutDownRequest_How { - if m != nil && m.How != nil { - return *m.How - } - return ShutDownRequest_SOCKET_SHUT_RD -} - -func (m *ShutDownRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return 0 -} - -type ShutDownReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } -func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } -func (*ShutDownReply) ProtoMessage() {} -func (*ShutDownReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} -} -func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) -} -func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) -} -func (dst *ShutDownReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownReply.Merge(dst, src) -} -func (m *ShutDownReply) XXX_Size() int { - return xxx_messageInfo_ShutDownReply.Size(m) -} -func (m *ShutDownReply) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo - -type CloseRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} -} -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (dst *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(dst, src) -} -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo - -const Default_CloseRequest_SendOffset int64 = -1 - -func (m *CloseRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CloseRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return Default_CloseRequest_SendOffset -} - -type CloseReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseReply) Reset() { *m = CloseReply{} } -func (m *CloseReply) String() string { return proto.CompactTextString(m) } -func (*CloseReply) ProtoMessage() {} -func (*CloseReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} -} -func (m *CloseReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseReply.Unmarshal(m, b) -} -func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) -} -func (dst *CloseReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseReply.Merge(dst, src) -} -func (m *CloseReply) XXX_Size() int { - return xxx_messageInfo_CloseReply.Size(m) -} -func (m *CloseReply) XXX_DiscardUnknown() { - xxx_messageInfo_CloseReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseReply proto.InternalMessageInfo - -type SendRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` - StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` - SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} -} -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (dst *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(dst, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -const Default_SendRequest_Flags int32 = 0 -const Default_SendRequest_TimeoutSeconds float64 = -1 - -func (m *SendRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SendRequest) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SendRequest) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *SendRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_SendRequest_Flags -} - -func (m *SendRequest) GetSendTo() *AddressPort { - if m != nil { - return m.SendTo - } - return nil -} - -func (m *SendRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_SendRequest_TimeoutSeconds -} - -type SendReply struct { - DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendReply) Reset() { *m = SendReply{} } -func (m *SendReply) String() string { return proto.CompactTextString(m) } -func (*SendReply) ProtoMessage() {} -func (*SendReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} -} -func (m *SendReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendReply.Unmarshal(m, b) -} -func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) -} -func (dst *SendReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendReply.Merge(dst, src) -} -func (m *SendReply) XXX_Size() int { - return xxx_messageInfo_SendReply.Size(m) -} -func (m *SendReply) XXX_DiscardUnknown() { - xxx_messageInfo_SendReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SendReply proto.InternalMessageInfo - -func (m *SendReply) GetDataSent() int32 { - if m != nil && m.DataSent != nil { - return *m.DataSent - } - return 0 -} - -type ReceiveRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` - Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } -func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } -func (*ReceiveRequest) ProtoMessage() {} -func (*ReceiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} -} -func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) -} -func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) -} -func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveRequest.Merge(dst, src) -} -func (m *ReceiveRequest) XXX_Size() int { - return xxx_messageInfo_ReceiveRequest.Size(m) -} -func (m *ReceiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo - -const Default_ReceiveRequest_Flags int32 = 0 -const Default_ReceiveRequest_TimeoutSeconds float64 = -1 - -func (m *ReceiveRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ReceiveRequest) GetDataSize() int32 { - if m != nil && m.DataSize != nil { - return *m.DataSize - } - return 0 -} - -func (m *ReceiveRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_ReceiveRequest_Flags -} - -func (m *ReceiveRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ReceiveRequest_TimeoutSeconds -} - -type ReceiveReply struct { - StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` - BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } -func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } -func (*ReceiveReply) ProtoMessage() {} -func (*ReceiveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} -} -func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) -} -func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) -} -func (dst *ReceiveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveReply.Merge(dst, src) -} -func (m *ReceiveReply) XXX_Size() int { - return xxx_messageInfo_ReceiveReply.Size(m) -} -func (m *ReceiveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo - -func (m *ReceiveReply) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *ReceiveReply) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ReceiveReply) GetReceivedFrom() *AddressPort { - if m != nil { - return m.ReceivedFrom - } - return nil -} - -func (m *ReceiveReply) GetBufferSize() int32 { - if m != nil && m.BufferSize != nil { - return *m.BufferSize - } - return 0 -} - -type PollEvent struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` - ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollEvent) Reset() { *m = PollEvent{} } -func (m *PollEvent) String() string { return proto.CompactTextString(m) } -func (*PollEvent) ProtoMessage() {} -func (*PollEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} -} -func (m *PollEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollEvent.Unmarshal(m, b) -} -func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) -} -func (dst *PollEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollEvent.Merge(dst, src) -} -func (m *PollEvent) XXX_Size() int { - return xxx_messageInfo_PollEvent.Size(m) -} -func (m *PollEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PollEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PollEvent proto.InternalMessageInfo - -func (m *PollEvent) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *PollEvent) GetRequestedEvents() int32 { - if m != nil && m.RequestedEvents != nil { - return *m.RequestedEvents - } - return 0 -} - -func (m *PollEvent) GetObservedEvents() int32 { - if m != nil && m.ObservedEvents != nil { - return *m.ObservedEvents - } - return 0 -} - -type PollRequest struct { - Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollRequest) Reset() { *m = PollRequest{} } -func (m *PollRequest) String() string { return proto.CompactTextString(m) } -func (*PollRequest) ProtoMessage() {} -func (*PollRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} -} -func (m *PollRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollRequest.Unmarshal(m, b) -} -func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) -} -func (dst *PollRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollRequest.Merge(dst, src) -} -func (m *PollRequest) XXX_Size() int { - return xxx_messageInfo_PollRequest.Size(m) -} -func (m *PollRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PollRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PollRequest proto.InternalMessageInfo - -const Default_PollRequest_TimeoutSeconds float64 = -1 - -func (m *PollRequest) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -func (m *PollRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_PollRequest_TimeoutSeconds -} - -type PollReply struct { - Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollReply) Reset() { *m = PollReply{} } -func (m *PollReply) String() string { return proto.CompactTextString(m) } -func (*PollReply) ProtoMessage() {} -func (*PollReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} -} -func (m *PollReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollReply.Unmarshal(m, b) -} -func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) -} -func (dst *PollReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollReply.Merge(dst, src) -} -func (m *PollReply) XXX_Size() int { - return xxx_messageInfo_PollReply.Size(m) -} -func (m *PollReply) XXX_DiscardUnknown() { - xxx_messageInfo_PollReply.DiscardUnknown(m) -} - -var xxx_messageInfo_PollReply proto.InternalMessageInfo - -func (m *PollReply) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -type ResolveRequest struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } -func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveRequest) ProtoMessage() {} -func (*ResolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} -} -func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) -} -func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) -} -func (dst *ResolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveRequest.Merge(dst, src) -} -func (m *ResolveRequest) XXX_Size() int { - return xxx_messageInfo_ResolveRequest.Size(m) -} -func (m *ResolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo - -func (m *ResolveRequest) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { - if m != nil { - return m.AddressFamilies - } - return nil -} - -type ResolveReply struct { - PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` - Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveReply) Reset() { *m = ResolveReply{} } -func (m *ResolveReply) String() string { return proto.CompactTextString(m) } -func (*ResolveReply) ProtoMessage() {} -func (*ResolveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} -} -func (m *ResolveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveReply.Unmarshal(m, b) -} -func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) -} -func (dst *ResolveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveReply.Merge(dst, src) -} -func (m *ResolveReply) XXX_Size() int { - return xxx_messageInfo_ResolveReply.Size(m) -} -func (m *ResolveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveReply proto.InternalMessageInfo - -func (m *ResolveReply) GetPackedAddress() [][]byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *ResolveReply) GetCanonicalName() string { - if m != nil && m.CanonicalName != nil { - return *m.CanonicalName - } - return "" -} - -func (m *ResolveReply) GetAliases() []string { - if m != nil { - return m.Aliases - } - return nil -} - -func init() { - proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") - proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") - proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") - proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") - proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") - proto.RegisterType((*BindReply)(nil), "appengine.BindReply") - proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") - proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") - proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") - proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") - proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") - proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") - proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") - proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") - proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") - proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") - proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") - proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") - proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") - proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") - proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") - proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") - proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") - proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") - proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") - proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") - proto.RegisterType((*SendReply)(nil), "appengine.SendReply") - proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") - proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") - proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") - proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") - proto.RegisterType((*PollReply)(nil), "appengine.PollReply") - proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") - proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) -} - -var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ - // 3088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, - 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, - 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, - 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, - 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, - 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, - 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, - 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, - 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, - 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, - 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, - 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, - 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, - 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, - 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, - 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, - 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, - 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, - 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, - 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, - 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, - 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, - 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, - 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, - 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, - 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, - 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, - 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, - 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, - 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, - 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, - 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, - 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, - 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, - 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, - 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, - 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, - 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, - 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, - 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, - 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, - 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, - 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, - 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, - 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, - 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, - 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, - 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, - 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, - 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, - 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, - 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, - 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, - 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, - 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, - 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, - 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, - 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, - 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, - 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, - 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, - 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, - 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, - 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, - 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, - 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, - 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, - 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, - 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, - 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, - 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, - 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, - 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, - 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, - 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, - 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, - 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, - 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, - 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, - 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, - 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, - 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, - 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, - 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, - 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, - 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, - 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, - 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, - 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, - 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, - 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, - 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, - 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, - 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, - 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, - 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, - 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, - 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, - 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, - 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, - 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, - 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, - 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, - 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, - 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, - 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, - 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, - 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, - 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, - 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, - 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, - 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, - 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, - 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, - 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, - 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, - 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, - 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, - 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, - 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, - 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, - 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, - 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, - 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, - 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, - 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, - 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, - 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, - 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, - 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, - 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, - 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, - 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, - 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, - 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, - 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, - 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, - 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, - 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, - 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, - 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, - 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, - 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, - 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, - 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, - 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, - 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, - 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, - 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, - 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, - 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, - 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, - 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, - 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, - 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, - 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, - 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, - 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, - 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, - 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, - 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, - 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, - 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, - 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, - 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, - 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, - 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, - 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, - 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, - 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, - 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, - 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, - 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, - 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, - 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, - 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, - 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, - 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, - 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, - 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, - 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, - 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, - 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, - 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, - 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, - 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, - 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, - 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, - 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, - 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, - 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, - 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, - 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto deleted file mode 100644 index 2fcc7953dc..0000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +++ /dev/null @@ -1,460 +0,0 @@ -syntax = "proto2"; -option go_package = "socket"; - -package appengine; - -message RemoteSocketServiceError { - enum ErrorCode { - SYSTEM_ERROR = 1; - GAI_ERROR = 2; - FAILURE = 4; - PERMISSION_DENIED = 5; - INVALID_REQUEST = 6; - SOCKET_CLOSED = 7; - } - - enum SystemError { - option allow_alias = true; - - SYS_SUCCESS = 0; - SYS_EPERM = 1; - SYS_ENOENT = 2; - SYS_ESRCH = 3; - SYS_EINTR = 4; - SYS_EIO = 5; - SYS_ENXIO = 6; - SYS_E2BIG = 7; - SYS_ENOEXEC = 8; - SYS_EBADF = 9; - SYS_ECHILD = 10; - SYS_EAGAIN = 11; - SYS_EWOULDBLOCK = 11; - SYS_ENOMEM = 12; - SYS_EACCES = 13; - SYS_EFAULT = 14; - SYS_ENOTBLK = 15; - SYS_EBUSY = 16; - SYS_EEXIST = 17; - SYS_EXDEV = 18; - SYS_ENODEV = 19; - SYS_ENOTDIR = 20; - SYS_EISDIR = 21; - SYS_EINVAL = 22; - SYS_ENFILE = 23; - SYS_EMFILE = 24; - SYS_ENOTTY = 25; - SYS_ETXTBSY = 26; - SYS_EFBIG = 27; - SYS_ENOSPC = 28; - SYS_ESPIPE = 29; - SYS_EROFS = 30; - SYS_EMLINK = 31; - SYS_EPIPE = 32; - SYS_EDOM = 33; - SYS_ERANGE = 34; - SYS_EDEADLK = 35; - SYS_EDEADLOCK = 35; - SYS_ENAMETOOLONG = 36; - SYS_ENOLCK = 37; - SYS_ENOSYS = 38; - SYS_ENOTEMPTY = 39; - SYS_ELOOP = 40; - SYS_ENOMSG = 42; - SYS_EIDRM = 43; - SYS_ECHRNG = 44; - SYS_EL2NSYNC = 45; - SYS_EL3HLT = 46; - SYS_EL3RST = 47; - SYS_ELNRNG = 48; - SYS_EUNATCH = 49; - SYS_ENOCSI = 50; - SYS_EL2HLT = 51; - SYS_EBADE = 52; - SYS_EBADR = 53; - SYS_EXFULL = 54; - SYS_ENOANO = 55; - SYS_EBADRQC = 56; - SYS_EBADSLT = 57; - SYS_EBFONT = 59; - SYS_ENOSTR = 60; - SYS_ENODATA = 61; - SYS_ETIME = 62; - SYS_ENOSR = 63; - SYS_ENONET = 64; - SYS_ENOPKG = 65; - SYS_EREMOTE = 66; - SYS_ENOLINK = 67; - SYS_EADV = 68; - SYS_ESRMNT = 69; - SYS_ECOMM = 70; - SYS_EPROTO = 71; - SYS_EMULTIHOP = 72; - SYS_EDOTDOT = 73; - SYS_EBADMSG = 74; - SYS_EOVERFLOW = 75; - SYS_ENOTUNIQ = 76; - SYS_EBADFD = 77; - SYS_EREMCHG = 78; - SYS_ELIBACC = 79; - SYS_ELIBBAD = 80; - SYS_ELIBSCN = 81; - SYS_ELIBMAX = 82; - SYS_ELIBEXEC = 83; - SYS_EILSEQ = 84; - SYS_ERESTART = 85; - SYS_ESTRPIPE = 86; - SYS_EUSERS = 87; - SYS_ENOTSOCK = 88; - SYS_EDESTADDRREQ = 89; - SYS_EMSGSIZE = 90; - SYS_EPROTOTYPE = 91; - SYS_ENOPROTOOPT = 92; - SYS_EPROTONOSUPPORT = 93; - SYS_ESOCKTNOSUPPORT = 94; - SYS_EOPNOTSUPP = 95; - SYS_ENOTSUP = 95; - SYS_EPFNOSUPPORT = 96; - SYS_EAFNOSUPPORT = 97; - SYS_EADDRINUSE = 98; - SYS_EADDRNOTAVAIL = 99; - SYS_ENETDOWN = 100; - SYS_ENETUNREACH = 101; - SYS_ENETRESET = 102; - SYS_ECONNABORTED = 103; - SYS_ECONNRESET = 104; - SYS_ENOBUFS = 105; - SYS_EISCONN = 106; - SYS_ENOTCONN = 107; - SYS_ESHUTDOWN = 108; - SYS_ETOOMANYREFS = 109; - SYS_ETIMEDOUT = 110; - SYS_ECONNREFUSED = 111; - SYS_EHOSTDOWN = 112; - SYS_EHOSTUNREACH = 113; - SYS_EALREADY = 114; - SYS_EINPROGRESS = 115; - SYS_ESTALE = 116; - SYS_EUCLEAN = 117; - SYS_ENOTNAM = 118; - SYS_ENAVAIL = 119; - SYS_EISNAM = 120; - SYS_EREMOTEIO = 121; - SYS_EDQUOT = 122; - SYS_ENOMEDIUM = 123; - SYS_EMEDIUMTYPE = 124; - SYS_ECANCELED = 125; - SYS_ENOKEY = 126; - SYS_EKEYEXPIRED = 127; - SYS_EKEYREVOKED = 128; - SYS_EKEYREJECTED = 129; - SYS_EOWNERDEAD = 130; - SYS_ENOTRECOVERABLE = 131; - SYS_ERFKILL = 132; - } - - optional int32 system_error = 1 [default=0]; - optional string error_detail = 2; -} - -message AddressPort { - required int32 port = 1; - optional bytes packed_address = 2; - - optional string hostname_hint = 3; -} - - - -message CreateSocketRequest { - enum SocketFamily { - IPv4 = 1; - IPv6 = 2; - } - - enum SocketProtocol { - TCP = 1; - UDP = 2; - } - - required SocketFamily family = 1; - required SocketProtocol protocol = 2; - - repeated SocketOption socket_options = 3; - - optional AddressPort proxy_external_ip = 4; - - optional int32 listen_backlog = 5 [default=0]; - - optional AddressPort remote_ip = 6; - - optional string app_id = 9; - - optional int64 project_id = 10; -} - -message CreateSocketReply { - optional string socket_descriptor = 1; - - optional AddressPort server_address = 3; - - optional AddressPort proxy_external_ip = 4; - - extensions 1000 to max; -} - - - -message BindRequest { - required string socket_descriptor = 1; - required AddressPort proxy_external_ip = 2; -} - -message BindReply { - optional AddressPort proxy_external_ip = 1; -} - - - -message GetSocketNameRequest { - required string socket_descriptor = 1; -} - -message GetSocketNameReply { - optional AddressPort proxy_external_ip = 2; -} - - - -message GetPeerNameRequest { - required string socket_descriptor = 1; -} - -message GetPeerNameReply { - optional AddressPort peer_ip = 2; -} - - -message SocketOption { - - enum SocketOptionLevel { - SOCKET_SOL_IP = 0; - SOCKET_SOL_SOCKET = 1; - SOCKET_SOL_TCP = 6; - SOCKET_SOL_UDP = 17; - } - - enum SocketOptionName { - option allow_alias = true; - - SOCKET_SO_DEBUG = 1; - SOCKET_SO_REUSEADDR = 2; - SOCKET_SO_TYPE = 3; - SOCKET_SO_ERROR = 4; - SOCKET_SO_DONTROUTE = 5; - SOCKET_SO_BROADCAST = 6; - SOCKET_SO_SNDBUF = 7; - SOCKET_SO_RCVBUF = 8; - SOCKET_SO_KEEPALIVE = 9; - SOCKET_SO_OOBINLINE = 10; - SOCKET_SO_LINGER = 13; - SOCKET_SO_RCVTIMEO = 20; - SOCKET_SO_SNDTIMEO = 21; - - SOCKET_IP_TOS = 1; - SOCKET_IP_TTL = 2; - SOCKET_IP_HDRINCL = 3; - SOCKET_IP_OPTIONS = 4; - - SOCKET_TCP_NODELAY = 1; - SOCKET_TCP_MAXSEG = 2; - SOCKET_TCP_CORK = 3; - SOCKET_TCP_KEEPIDLE = 4; - SOCKET_TCP_KEEPINTVL = 5; - SOCKET_TCP_KEEPCNT = 6; - SOCKET_TCP_SYNCNT = 7; - SOCKET_TCP_LINGER2 = 8; - SOCKET_TCP_DEFER_ACCEPT = 9; - SOCKET_TCP_WINDOW_CLAMP = 10; - SOCKET_TCP_INFO = 11; - SOCKET_TCP_QUICKACK = 12; - } - - required SocketOptionLevel level = 1; - required SocketOptionName option = 2; - required bytes value = 3; -} - - -message SetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message SetSocketOptionsReply { -} - -message GetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message GetSocketOptionsReply { - repeated SocketOption options = 2; -} - - -message ConnectRequest { - required string socket_descriptor = 1; - required AddressPort remote_ip = 2; - optional double timeout_seconds = 3 [default=-1]; -} - -message ConnectReply { - optional AddressPort proxy_external_ip = 1; - - extensions 1000 to max; -} - - -message ListenRequest { - required string socket_descriptor = 1; - required int32 backlog = 2; -} - -message ListenReply { -} - - -message AcceptRequest { - required string socket_descriptor = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message AcceptReply { - optional bytes new_socket_descriptor = 2; - optional AddressPort remote_address = 3; -} - - - -message ShutDownRequest { - enum How { - SOCKET_SHUT_RD = 1; - SOCKET_SHUT_WR = 2; - SOCKET_SHUT_RDWR = 3; - } - required string socket_descriptor = 1; - required How how = 2; - required int64 send_offset = 3; -} - -message ShutDownReply { -} - - - -message CloseRequest { - required string socket_descriptor = 1; - optional int64 send_offset = 2 [default=-1]; -} - -message CloseReply { -} - - - -message SendRequest { - required string socket_descriptor = 1; - required bytes data = 2 [ctype=CORD]; - required int64 stream_offset = 3; - optional int32 flags = 4 [default=0]; - optional AddressPort send_to = 5; - optional double timeout_seconds = 6 [default=-1]; -} - -message SendReply { - optional int32 data_sent = 1; -} - - -message ReceiveRequest { - enum Flags { - MSG_OOB = 1; - MSG_PEEK = 2; - } - required string socket_descriptor = 1; - required int32 data_size = 2; - optional int32 flags = 3 [default=0]; - optional double timeout_seconds = 5 [default=-1]; -} - -message ReceiveReply { - optional int64 stream_offset = 2; - optional bytes data = 3 [ctype=CORD]; - optional AddressPort received_from = 4; - optional int32 buffer_size = 5; -} - - - -message PollEvent { - - enum PollEventFlag { - SOCKET_POLLNONE = 0; - SOCKET_POLLIN = 1; - SOCKET_POLLPRI = 2; - SOCKET_POLLOUT = 4; - SOCKET_POLLERR = 8; - SOCKET_POLLHUP = 16; - SOCKET_POLLNVAL = 32; - SOCKET_POLLRDNORM = 64; - SOCKET_POLLRDBAND = 128; - SOCKET_POLLWRNORM = 256; - SOCKET_POLLWRBAND = 512; - SOCKET_POLLMSG = 1024; - SOCKET_POLLREMOVE = 4096; - SOCKET_POLLRDHUP = 8192; - }; - - required string socket_descriptor = 1; - required int32 requested_events = 2; - required int32 observed_events = 3; -} - -message PollRequest { - repeated PollEvent events = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message PollReply { - repeated PollEvent events = 2; -} - -message ResolveRequest { - required string name = 1; - repeated CreateSocketRequest.SocketFamily address_families = 2; -} - -message ResolveReply { - enum ErrorCode { - SOCKET_EAI_ADDRFAMILY = 1; - SOCKET_EAI_AGAIN = 2; - SOCKET_EAI_BADFLAGS = 3; - SOCKET_EAI_FAIL = 4; - SOCKET_EAI_FAMILY = 5; - SOCKET_EAI_MEMORY = 6; - SOCKET_EAI_NODATA = 7; - SOCKET_EAI_NONAME = 8; - SOCKET_EAI_SERVICE = 9; - SOCKET_EAI_SOCKTYPE = 10; - SOCKET_EAI_SYSTEM = 11; - SOCKET_EAI_BADHINTS = 12; - SOCKET_EAI_PROTOCOL = 13; - SOCKET_EAI_OVERFLOW = 14; - SOCKET_EAI_MAX = 15; - }; - - repeated bytes packed_address = 2; - optional string canonical_name = 3; - repeated string aliases = 4; -} diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go deleted file mode 100644 index 3de46df826..0000000000 --- a/vendor/google.golang.org/appengine/socket/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package socket provides outbound network sockets. -// -// This package is only required in the classic App Engine environment. -// Applications running only in App Engine "flexible environment" should -// use the standard library's net package. -package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go deleted file mode 100644 index 0ad50e2d36..0000000000 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package socket - -import ( - "fmt" - "io" - "net" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" - - pb "google.golang.org/appengine/internal/socket" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - return DialTimeout(ctx, protocol, addr, 0) -} - -var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ - pb.CreateSocketRequest_IPv4, - pb.CreateSocketRequest_IPv6, -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. - if timeout > 0 { - var cancel context.CancelFunc - dialCtx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) - } - - var prot pb.CreateSocketRequest_SocketProtocol - switch protocol { - case "tcp": - prot = pb.CreateSocketRequest_TCP - case "udp": - prot = pb.CreateSocketRequest_UDP - default: - return nil, fmt.Errorf("socket: unknown protocol %q", protocol) - } - - packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - if len(packedAddrs) == 0 { - return nil, fmt.Errorf("no addresses for %q", host) - } - - packedAddr := packedAddrs[0] // use first address - fam := pb.CreateSocketRequest_IPv4 - if len(packedAddr) == net.IPv6len { - fam = pb.CreateSocketRequest_IPv6 - } - - req := &pb.CreateSocketRequest{ - Family: fam.Enum(), - Protocol: prot.Enum(), - RemoteIp: &pb.AddressPort{ - Port: proto.Int32(int32(port)), - PackedAddress: packedAddr, - }, - } - if resolved { - req.RemoteIp.HostnameHint = &host - } - res := &pb.CreateSocketReply{} - if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { - return nil, err - } - - return &Conn{ - ctx: ctx, - desc: res.GetSocketDescriptor(), - prot: prot, - local: res.ProxyExternalIp, - remote: req.RemoteIp, - }, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - packedAddrs, _, err := resolve(ctx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - addrs = make([]net.IP, len(packedAddrs)) - for i, pa := range packedAddrs { - addrs[i] = net.IP(pa) - } - return addrs, nil -} - -func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { - // Check if it's an IP address. - if ip := net.ParseIP(host); ip != nil { - if ip := ip.To4(); ip != nil { - return [][]byte{ip}, false, nil - } - return [][]byte{ip}, false, nil - } - - req := &pb.ResolveRequest{ - Name: &host, - AddressFamilies: fams, - } - res := &pb.ResolveReply{} - if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { - // XXX: need to map to pb.ResolveReply_ErrorCode? - return nil, false, err - } - return res.PackedAddress, true, nil -} - -// withDeadline is like context.WithDeadline, except it ignores the zero deadline. -func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { - if deadline.IsZero() { - return parent, func() {} - } - return context.WithDeadline(parent, deadline) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - ctx context.Context - desc string - offset int64 - - prot pb.CreateSocketRequest_SocketProtocol - local, remote *pb.AddressPort - - readDeadline, writeDeadline time.Time // optional -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - cn.ctx = ctx -} - -func (cn *Conn) Read(b []byte) (n int, err error) { - const maxRead = 1 << 20 - if len(b) > maxRead { - b = b[:maxRead] - } - - req := &pb.ReceiveRequest{ - SocketDescriptor: &cn.desc, - DataSize: proto.Int32(int32(len(b))), - } - res := &pb.ReceiveReply{} - if !cn.readDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) - defer cancel() - if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { - return 0, err - } - if len(res.Data) == 0 { - return 0, io.EOF - } - if len(res.Data) > len(b) { - return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) - } - return copy(b, res.Data), nil -} - -func (cn *Conn) Write(b []byte) (n int, err error) { - const lim = 1 << 20 // max per chunk - - for n < len(b) { - chunk := b[n:] - if len(chunk) > lim { - chunk = chunk[:lim] - } - - req := &pb.SendRequest{ - SocketDescriptor: &cn.desc, - Data: chunk, - StreamOffset: &cn.offset, - } - res := &pb.SendReply{} - if !cn.writeDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) - defer cancel() - if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { - // assume zero bytes were sent in this RPC - break - } - n += int(res.GetDataSent()) - cn.offset += int64(res.GetDataSent()) - } - - return -} - -func (cn *Conn) Close() error { - req := &pb.CloseRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.CloseReply{} - if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { - return err - } - cn.desc = "CLOSED" - return nil -} - -func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { - if ap == nil { - return nil - } - switch prot { - case pb.CreateSocketRequest_TCP: - return &net.TCPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - case pb.CreateSocketRequest_UDP: - return &net.UDPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - } - panic("unknown protocol " + prot.String()) -} - -func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } -func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } - -func (cn *Conn) SetDeadline(t time.Time) error { - cn.readDeadline = t - cn.writeDeadline = t - return nil -} - -func (cn *Conn) SetReadDeadline(t time.Time) error { - cn.readDeadline = t - return nil -} - -func (cn *Conn) SetWriteDeadline(t time.Time) error { - cn.writeDeadline = t - return nil -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - req := &pb.GetSocketNameRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.GetSocketNameReply{} - return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go deleted file mode 100644 index c804169a1c..0000000000 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package socket - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - conn, err := net.DialTimeout(protocol, addr, timeout) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - return net.LookupIP(host) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - net.Conn -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - // This function is not required in App Engine "flexible environment". -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - // This function is not required in App Engine "flexible environment". - return nil -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index dbe2e2d0c6..6ce01ac9a6 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/field_behavior.proto package annotations @@ -78,6 +78,19 @@ const ( // a non-empty value will be returned. The user will not be aware of what // non-empty value to expect. FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 ) // Enum value maps for FieldBehavior. @@ -91,6 +104,7 @@ var ( 5: "IMMUTABLE", 6: "UNORDERED_LIST", 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -101,6 +115,7 @@ var ( "IMMUTABLE": 5, "UNORDERED_LIST": 6, "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, } ) @@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 0000000000..d02e6bbc89 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,295 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters, and zero-padded partial and + // empty octets. For example, the value `2001:DB8::` would be normalized to + // `2001:0db8:0:0`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, + 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go deleted file mode 100644 index 9fb745926a..0000000000 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2022 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by aliasgen. DO NOT EDIT. - -// Package iam aliases all exported identifiers in package -// "cloud.google.com/go/iam/apiv1/iampb". -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb. -// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md -// for more details. -package iam - -import ( - src "cloud.google.com/go/iam/apiv1/iampb" - grpc "google.golang.org/grpc" -) - -// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb -const ( - AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED - AuditConfigDelta_ADD = src.AuditConfigDelta_ADD - AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE - AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ - AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ - AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE - AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED - BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED - BindingDelta_ADD = src.BindingDelta_ADD - BindingDelta_REMOVE = src.BindingDelta_REMOVE -) - -// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb -var ( - AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name - AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value - AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name - AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value - BindingDelta_Action_name = src.BindingDelta_Action_name - BindingDelta_Action_value = src.BindingDelta_Action_value - File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto - File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto - File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto -) - -// Specifies the audit configuration for a service. The configuration -// determines which permission types are logged, and what identities, if any, -// are exempted from logging. An AuditConfig must have one or more -// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a -// specific service, the union of the two AuditConfigs is used for that -// service: the log_types specified in each AuditConfig are enabled, and the -// exempted_members in each AuditLogConfig are exempted. Example Policy with -// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices", -// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type": -// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com", -// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type": -// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For -// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ -// logging. It also exempts jose@example.com from DATA_READ logging, and -// aliya@example.com from DATA_WRITE logging. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfig = src.AuditConfig - -// One delta entry for AuditConfig. Each individual change (only one -// exempted_member in each entry) to a AuditConfig will be a separate entry. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfigDelta = src.AuditConfigDelta - -// The type of action performed on an audit configuration in a policy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditConfigDelta_Action = src.AuditConfigDelta_Action - -// Provides the configuration for logging a type of permissions. Example: { -// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [ -// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables -// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from -// DATA_READ logging. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditLogConfig = src.AuditLogConfig - -// The list of valid permission types for which logging can be configured. -// Admin writes are always logged, and are not configurable. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type AuditLogConfig_LogType = src.AuditLogConfig_LogType - -// Associates `members`, or principals, with a `role`. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type Binding = src.Binding - -// One delta entry for Binding. Each individual change (only one member in -// each entry) to a binding will be a separate entry. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type BindingDelta = src.BindingDelta - -// The type of action performed on a Binding in a policy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type BindingDelta_Action = src.BindingDelta_Action - -// Request message for `GetIamPolicy` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type GetIamPolicyRequest = src.GetIamPolicyRequest - -// Encapsulates settings provided to GetIamPolicy. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type GetPolicyOptions = src.GetPolicyOptions - -// IAMPolicyClient is the client API for IAMPolicy service. For semantics -// around ctx use and closing/ending streaming RPCs, please refer to -// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type IAMPolicyClient = src.IAMPolicyClient - -// IAMPolicyServer is the server API for IAMPolicy service. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type IAMPolicyServer = src.IAMPolicyServer - -// An Identity and Access Management (IAM) policy, which specifies access -// controls for Google Cloud resources. A `Policy` is a collection of -// `bindings`. A `binding` binds one or more `members`, or principals, to a -// single `role`. Principals can be user accounts, service accounts, Google -// groups, and domains (such as G Suite). A `role` is a named list of -// permissions; each `role` can be an IAM predefined role or a user-created -// custom role. For some types of Google Cloud resources, a `binding` can also -// specify a `condition`, which is a logical expression that allows access to a -// resource only if the expression evaluates to `true`. A condition can add -// constraints based on attributes of the request, the resource, or both. To -// learn which resources support conditions in their IAM policies, see the [IAM -// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). -// **JSON example:** { "bindings": [ { "role": -// "roles/resourcemanager.organizationAdmin", "members": [ -// "user:mike@example.com", "group:admins@example.com", "domain:google.com", -// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": -// "roles/resourcemanager.organizationViewer", "members": [ -// "user:eve@example.com" ], "condition": { "title": "expirable access", -// "description": "Does not grant access after Sep 2020", "expression": -// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": -// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - -// user:mike@example.com - group:admins@example.com - domain:google.com - -// serviceAccount:my-project-id@appspot.gserviceaccount.com role: -// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com -// role: roles/resourcemanager.organizationViewer condition: title: expirable -// access description: Does not grant access after Sep 2020 expression: -// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= -// version: 3 For a description of IAM and its features, see the [IAM -// documentation](https://cloud.google.com/iam/docs/). -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type Policy = src.Policy - -// The difference delta between two policies. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type PolicyDelta = src.PolicyDelta - -// Request message for `SetIamPolicy` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type SetIamPolicyRequest = src.SetIamPolicyRequest - -// Request message for `TestIamPermissions` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type TestIamPermissionsRequest = src.TestIamPermissionsRequest - -// Response message for `TestIamPermissions` method. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type TestIamPermissionsResponse = src.TestIamPermissionsResponse - -// UnimplementedIAMPolicyServer can be embedded to have forward compatible -// implementations. -// -// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb -type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer - -// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb -func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { - return src.NewIAMPolicyClient(cc) -} - -// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb -func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { - src.RegisterIAMPolicyServer(s, srv) -} diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a58..ab0fbb79b8 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3efca45914..52d530d7ad 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -112,19 +111,31 @@ func (a *Attributes) String() string { sb.WriteString("{") first := true for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) first = false } sb.WriteString("}") return sb.String() } + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c0e..d79560a2e2 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -105,8 +120,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +130,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +285,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d31..a7f1eeec8e 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index f070878bd9..f354530289 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 6d698229a3..86ba65be4c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -32,14 +32,18 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" durationpb "github.com/golang/protobuf/ptypes/duration" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" @@ -132,7 +136,11 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal // This generates a manual resolver builder with a fixed scheme. This // scheme will be used to dial to remote LB, so we can send filtered // address updates to remote LB ClientConn using this manual resolver. - r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + mr := manual.NewBuilderWithScheme("grpclb-internal") + // ResolveNow() on this manual resolver is forwarded to the parent + // ClientConn, so when grpclb client loses contact with the remote balancer, + // the parent ClientConn's resolver will re-resolve. + mr.ResolveNowCallback = cc.ResolveNow lb := &lbBalancer{ cc: newLBCacheClientConn(cc), @@ -142,23 +150,24 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), - manualResolver: r, + manualResolver: mr, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), clientStats: newRPCStats(), backoff: backoff.DefaultExponential, // TODO: make backoff configurable. } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[grpclb %p] ", lb)) var err error if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { - logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to grpclb: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { - logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to backends returned by grpclb: %v", err) } } @@ -170,6 +179,7 @@ type lbBalancer struct { dialTarget string // user's dial target target string // same as dialTarget unless overridden in service config opt balancer.BuildOptions + logger *internalgrpclog.PrefixLogger usePickFirst bool @@ -188,7 +198,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver + manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper // backoff for calling remote balancer. @@ -213,7 +223,7 @@ type lbBalancer struct { backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + subConns map[resolver.Address]balancer.SubConn // Used to new/shutdown SubConn. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. picker balancer.Picker // Support fallback to resolved backend addresses if there's no response @@ -236,12 +246,12 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} + lb.picker = base.NewErrPicker(fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)) return } if lb.state == connectivity.Connecting { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } @@ -268,7 +278,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // // This doesn't seem to be necessary after the connecting check above. // Kept for safety. - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } if lb.inFallback { @@ -290,7 +300,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // aggregateSubConnStats calculate the aggregated state of SubConns in // lb.SubConns. These SubConns are subconns in use (when switching between // fallback and grpclb). lb.scState contains states for all SubConns, including -// those in cache (SubConns are cached for 10 seconds after remove). +// those in cache (SubConns are cached for 10 seconds after shutdown). // // The aggregated state is: // - If at least one SubConn in Ready, the aggregated state is Ready; @@ -319,18 +329,24 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { return connectivity.TransientFailure } +// UpdateSubConnState is unused; NewSubConn's options always specifies +// updateSubConnState as the listener. func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + lb.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) +} + +func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState - if logger.V(2) { - logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { - if logger.V(2) { - logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) } return } @@ -339,8 +355,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) case connectivity.TransientFailure: lb.connErr = scs.ConnectionError @@ -373,8 +389,13 @@ func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop if forceRegeneratePicker || (lb.state != oldAggrState) { lb.regeneratePicker(resetDrop) } + var cc balancer.ClientConn = lb.cc + if lb.usePickFirst { + // Bypass the caching layer that would wrap the picker. + cc = lb.cc.ClientConn + } - lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) + cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) } // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use @@ -430,8 +451,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { if lb.usePickFirst == newUsePickFirst { return } - if logger.V(2) { - logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + if lb.logger.V(2) { + lb.logger.Infof("Switching mode. Is pick_first used for backends? %v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } @@ -442,23 +463,15 @@ func (lb *lbBalancer) ResolverError(error) { } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if logger.V(2) { - logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + if lb.logger.V(2) { + lb.logger.Infof("UpdateClientConnState: %s", pretty.ToJSON(ccs)) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) - addrs := ccs.ResolverState.Addresses + backendAddrs := ccs.ResolverState.Addresses - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - a.Type = resolver.Backend - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } + var remoteBalancerAddrs []resolver.Address if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { // Override any balancer addresses provided via // ccs.ResolverState.Addresses. @@ -479,7 +492,9 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } else if lb.ccRemoteLB == nil { // First time receiving resolved addresses, create a cc to remote // balancers. - lb.newRemoteBalancerCCWrapper() + if err := lb.newRemoteBalancerCCWrapper(); err != nil { + return err + } // Start the fallback goroutine. go lb.fallbackToBackendsAfter(lb.fallbackTimeout) } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 39bc5cc71e..20c5f2ec39 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -98,15 +98,6 @@ func (s *rpcStats) knownReceived() { atomic.AddInt64(&s.numCallsFinished, 1) } -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} - // rrPicker does roundrobin on subConns. It's typically used when there's no // response from remote balancer, and grpclb falls back to the resolved // backends. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index e56006d713..c8fe1edd8e 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -27,11 +27,8 @@ import ( "time" "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" @@ -39,13 +36,28 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" ) +func serverListEqual(a, b []*lbpb.Server) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !proto.Equal(a[i], b[i]) { + return false + } + } + return true +} + // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if logger.V(2) { - logger.Infof("lbBalancer: processing server list: %+v", l) + if lb.logger.V(2) { + lb.logger.Infof("Processing server list: %#v", l) } lb.mu.Lock() defer lb.mu.Unlock() @@ -55,9 +67,9 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { lb.serverListReceived = true // If the new server list == old server list, do nothing. - if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if logger.V(2) { - logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + if serverListEqual(lb.fullServerList, l.Servers) { + if lb.logger.V(2) { + lb.logger.Infof("Ignoring new server list as it is the same as the previous one") } return } @@ -78,9 +90,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { ipStr = fmt.Sprintf("[%s]", ipStr) } addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) - if logger.V(2) { - logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", - i, ipStr, s.Port, s.LoadBalanceToken) + if lb.logger.V(2) { + lb.logger.Infof("Server list entry:|%d|, ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) } @@ -113,7 +124,6 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } balancingPolicyChanged := lb.usePickFirst != pickFirst - oldUsePickFirst := lb.usePickFirst lb.usePickFirst = pickFirst if fallbackModeChanged || balancingPolicyChanged { @@ -123,13 +133,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // For fallback mode switching with pickfirst, we want to recreate the // SubConn because the creds could be different. for a, sc := range lb.subConns { - if oldUsePickFirst { - // If old SubConn were created for pickfirst, bypass cache and - // remove directly. - lb.cc.cc.RemoveSubConn(sc) - } else { - lb.cc.RemoveSubConn(sc) - } + sc.Shutdown() delete(lb.subConns, a) } } @@ -144,18 +148,19 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } if sc != nil { if len(backendAddrs) == 0 { - lb.cc.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, scKey) return } - lb.cc.cc.UpdateAddresses(sc, backendAddrs) + lb.cc.ClientConn.UpdateAddresses(sc, backendAddrs) sc.Connect() return } + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } // This bypasses the cc wrapper with SubConn cache. - sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) return } sc.Connect() @@ -176,9 +181,11 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback if _, ok := lb.subConns[addrWithoutAttrs]; !ok { // Use addrWithMD to create the SubConn. + var sc balancer.SubConn + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. @@ -194,7 +201,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback for a, sc := range lb.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - lb.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -221,7 +228,7 @@ type remoteBalancerCCWrapper struct { wg sync.WaitGroup } -func (lb *lbBalancer) newRemoteBalancerCCWrapper() { +func (lb *lbBalancer) newRemoteBalancerCCWrapper() error { var dopts []grpc.DialOption if creds := lb.opt.DialCreds; creds != nil { dopts = append(dopts, grpc.WithTransportCredentials(creds)) @@ -252,9 +259,10 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // // The grpclb server addresses will set field ServerName, and creds will // receive ServerName as authority. - cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + target := lb.manualResolver.Scheme() + ":///grpclb.subClientConn" + cc, err := grpc.Dial(target, dopts...) if err != nil { - logger.Fatalf("failed to dial: %v", err) + return fmt.Errorf("grpc.Dial(%s): %v", target, err) } ccw := &remoteBalancerCCWrapper{ cc: cc, @@ -265,6 +273,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { lb.ccRemoteLB = ccw ccw.wg.Add(1) go ccw.watchRemoteBalancer() + return nil } // close closed the ClientConn to remote balancer, and waits until all @@ -412,14 +421,14 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { default: if err != nil { if err == errServerTerminatedConnection { - logger.Info(err) + ccw.lb.logger.Infof("Call to remote balancer failed: %v", err) } else { - logger.Warning(err) + ccw.lb.logger.Warningf("Call to remote balancer failed: %v", err) } } } // Trigger a re-resolve when the stream errors. - ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + ccw.lb.cc.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = false diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go index 373f04b98d..c0f762c0c0 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -27,75 +27,15 @@ import ( "google.golang.org/grpc/resolver" ) -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// UpdateState calls cc.UpdateState. -func (r *lbManualResolver) UpdateState(s resolver.State) { - r.ccr.UpdateState(s) -} - const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. +// SubConns will be kept in cache for subConnCacheTime before being shut down. // -// Its new and remove methods are updated to do cache first. +// Its NewSubconn and SubConn.Shutdown methods are updated to do cache first. type lbCacheClientConn struct { - cc balancer.ClientConn + balancer.ClientConn + timeout time.Duration mu sync.Mutex @@ -113,7 +53,7 @@ type subConnCacheEntry struct { func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { return &lbCacheClientConn{ - cc: cc, + ClientConn: cc, timeout: subConnCacheTime, subConnCache: make(map[resolver.Address]*subConnCacheEntry), subConnToAddr: make(map[balancer.SubConn]resolver.Address), @@ -137,16 +77,27 @@ func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer return entry.sc, nil } - scNew, err := ccc.cc.NewSubConn(addrs, opts) + scNew, err := ccc.ClientConn.NewSubConn(addrs, opts) if err != nil { return nil, err } + scNew = &lbCacheSubConn{SubConn: scNew, ccc: ccc} ccc.subConnToAddr[scNew] = addrWithoutAttrs return scNew, nil } func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +type lbCacheSubConn struct { + balancer.SubConn + ccc *lbCacheClientConn +} + +func (sc *lbCacheSubConn) Shutdown() { + ccc := sc.ccc ccc.mu.Lock() defer ccc.mu.Unlock() addr, ok := ccc.subConnToAddr[sc] @@ -156,11 +107,11 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { if entry, ok := ccc.subConnCache[addr]; ok { if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. + // This could happen if NewSubConn was called multiple times for + // the same address, and those SubConns are all shut down. We + // remove sc immediately here. delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() } return } @@ -176,7 +127,7 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { if entry.abortDeleting { return } - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() delete(ccc.subConnToAddr, sc) delete(ccc.subConnCache, addr) }) @@ -195,14 +146,28 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { } func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { - ccc.cc.UpdateState(s) + s.Picker = &lbCachePicker{Picker: s.Picker} + ccc.ClientConn.UpdateState(s) } func (ccc *lbCacheClientConn) close() { ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. + defer ccc.mu.Unlock() + // Only cancel all existing timers. There's no need to shut down SubConns. for _, entry := range ccc.subConnCache { entry.cancel() } - ccc.mu.Unlock() +} + +type lbCachePicker struct { + balancer.Picker +} + +func (cp *lbCachePicker) Pick(i balancer.PickInfo) (balancer.PickResult, error) { + res, err := cp.Picker.Pick(i) + if err != nil { + return res, err + } + res.SubConn = res.SubConn.(*lbCacheSubConn).SubConn + return res, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 04b9ad4116..a4411c22bf 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat // lock held. But the lock guards only the scheduling part. The actual // callback is called asynchronously without the lock being held. ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs - } errCh <- ccb.balancer.UpdateClientConnState(*ccs) }) if !ok { @@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) ccb.mu.Unlock() } @@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mode = m - done := ccb.serializer.Done + done := ccb.serializer.Done() b := ccb.balancer ok := ccb.serializer.Schedule(func(_ context.Context) { // Close the serializer to ensure that no more calls from gRPC are sent @@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mu.Unlock() - // Give enqueued callbacks a chance to finish. + // Give enqueued callbacks a chance to finish before closing the balancer. <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() + b.Close() } // exitIdleMode is invoked by grpc when the channel exits idle mode either @@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) mu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer @@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() { go acbw.ac.connect() } +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. @@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa14d..5954801122 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d75..788c89c16f 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 95a7459b02..429c389e47 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -34,9 +34,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -53,8 +56,6 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -137,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), @@ -190,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Register ClientConn with channelz. cc.channelzRegistration(target) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -265,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Configure idleness support with configured idle timeout or default idle // timeout duration. Idleness can be explicitly disabled by the user, by // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) // Return early for non-blocking dials. if !cc.dopts.block { @@ -316,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name // resolver and load balancer. func (cc *ClientConn) exitIdleMode() error { @@ -325,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") return nil } @@ -349,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error { cc.idlenessState = ccIdlenessStateExitingIdle exitedIdle := false if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) } else { cc.blockingpicker.exitIdleMode() exitedIdle = true @@ -392,12 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -418,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -474,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -491,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -503,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -539,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -561,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -590,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -622,7 +650,7 @@ type ClientConn struct { channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -668,6 +696,19 @@ const ( ccIdlenessStateExitingIdle ) +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -759,6 +800,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -867,6 +918,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -874,7 +939,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), @@ -995,8 +1060,9 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1031,8 +1097,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1137,23 +1203,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1192,7 +1248,10 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { @@ -1226,7 +1285,7 @@ func (cc *ClientConn) Close() error { rWrapper.close() } if idlenessMgr != nil { - idlenessMgr.close() + idlenessMgr.Close() } for ac := range conns { @@ -1336,12 +1395,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1537,7 +1598,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1625,16 +1686,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1648,6 +1700,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1807,19 +1882,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: @@ -1872,7 +1998,11 @@ func (cc *ClientConn) determineAuthority() error { // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 1297765478..411e3dfd47 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 150ae55767..0854e7af65 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -25,8 +25,8 @@ import ( "fmt" "io" "net" - "sync" + "golang.org/x/sync/semaphore" grpc "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -35,15 +35,13 @@ import ( "google.golang.org/grpc/credentials/alts/internal/conn" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/internal/envconfig" ) const ( // The maximum byte size of receive frames. frameLimit = 64 * 1024 // 64 KB rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 ) var ( @@ -59,9 +57,9 @@ var ( return conn.NewAES128GCMRekey(s, keyData) }, } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) + // control number of concurrent created (but not closed) handshakes. + clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) // errDropped occurs when maxPendingHandshakes is reached. errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed @@ -77,30 +75,6 @@ func init() { } } -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - // ClientHandshakerOptions contains the client handshaker options that can // provided by the caller. type ClientHandshakerOptions struct { @@ -134,10 +108,6 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { return &ServerHandshakerOptions{} } -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - // altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. @@ -185,10 +155,10 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !clientHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer clientHandshakes.Release(1) if h.side != core.ClientSide { return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") @@ -238,10 +208,10 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent // ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !serverHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer serverHandshakes.Release(1) if h.side != core.ServerSide { return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") @@ -264,8 +234,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent } // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. params := make(map[int32]*altspb.ServerHandshakeParameters) params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ RecordProtocols: recordProtocols, @@ -391,3 +359,10 @@ func (h *altsHandshaker) Close() { h.stream.CloseSend() } } + +// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores +// to allow numberOfAllowedHandshakes concurrent handshakes each. +func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) { + clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) + serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) +} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 83e3bae37b..c7cf1810a1 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 0b0093328b..81d0f11408 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index c2e564c7de..69f0947582 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 15a3d5102a..cfc9fd85e8 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -78,6 +78,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -628,6 +643,8 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -664,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -676,3 +693,24 @@ func WithIdleTimeout(d time.Duration) DialOption { o.idleTimeout = d }) } + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a5861352..5ebf88d714 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35afe..0ee3d3bae9 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2a..ac73c9ced2 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34b..16928c9cb9 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822b..b1674d8267 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40d3..ecfd36d713 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go index b5bee48380..740745c45f 100644 --- a/vendor/google.golang.org/grpc/health/client.go +++ b/vendor/google.golang.org/grpc/health/client.go @@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 142d35f753..24299efd63 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d54..4439cda0f3 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -44,8 +44,15 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57be..877d78fc3d 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da5..fed1c011a3 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62a7..3c594e6e4e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d9665..94a08d6875 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f632215..0f31274a3c 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd76..4399c3df49 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -28,25 +28,25 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() defer b.mu.Unlock() if b.closed { @@ -89,7 +89,7 @@ func (b *Unbounded) Load() { // // If the unbounded buffer is closed, the read channel returned by this method // is closed. -func (b *Unbounded) Get() <-chan interface{} { +func (b *Unbounded) Get() <-chan any { return b.c } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd792..5395e77529 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2ce..f89e6f77bb 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e2e..1d4020f537 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e1..98288c3f86 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc4024..b5568b22e2 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b59033..9deee7f651 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 80fd5c7d2a..3cf10ddfbd 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -37,9 +37,15 @@ var ( // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a364..bfc45102ab 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42ca..faa998de76 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index d08e3e9076..aa97273e7d 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -80,6 +80,13 @@ func Uint32() uint32 { return r.Uint32() } +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + // Shuffle implements rand.Shuffle on the grpcrand global source. var Shuffle = func(n int, f func(int, int)) { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d4117e..900917dbe6 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -32,10 +32,10 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded closedMu sync.Mutex @@ -48,12 +48,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() - if t.closed { + if cs.closed { return false } - t.callbacks.Put(f) + cs.callbacks.Put(f) return true } -func (t *CallbackSerializer) run(ctx context.Context) { +func (cs *CallbackSerializer) run(ctx context.Context) { var backlog []func(context.Context) - defer close(t.Done) + defer close(cs.done) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): + case callback, ok := <-cs.callbacks.Get(): if !ok { return } - t.callbacks.Load() + cs.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() for _, b := range backlog { b(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { var backlog []func(context.Context) for { select { - case b := <-t.callbacks.Get(): + case b := <-cs.callbacks.Get(): backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() + cs.callbacks.Load() default: return backlog } } } + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 0000000000..aef8cec1ab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go similarity index 61% rename from vendor/google.golang.org/grpc/idle.go rename to vendor/google.golang.org/grpc/internal/idle/idle.go index dc3dc72f6b..6c272476e5 100644 --- a/vendor/google.golang.org/grpc/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -16,7 +16,9 @@ * */ -package grpc +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle import ( "fmt" @@ -24,6 +26,8 @@ import ( "sync" "sync/atomic" "time" + + "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { return time.AfterFunc(d, f) } -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// Enforcer is the functionality provided by grpc.ClientConn to enter // and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error } -// idlenessManager defines the functionality required to track RPC activity on a +// Manager defines the functionality required to track RPC activity on a // channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() } -type noopIdlenessManager struct{} +type noopManager struct{} -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -64,14 +68,15 @@ type idlenessManagerImpl struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: // - a: Idle timeout has fired and handleIdleTimeout() is trying to put // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() + // - b: At the same time an RPC is made on the channel, and OnCallBegin() // is trying to prevent the channel from going idle. // - Competing intentions: // - The channel is in idle mode and there are multiple RPCs starting at @@ -83,28 +88,37 @@ type idlenessManagerImpl struct { timer *time.Timer } -// newIdlenessManager creates a new idleness manager implementation for the +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the // given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} } - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m } // resetIdleTimer resets the idle timer to the given duration. This method // should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if i.timer == nil { + if m.timer == nil { // Only close sets timer to nil. We are done. return } @@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { // It is safe to ignore the return value from Reset() because this method is // only ever called from the timer callback, which means the timer has // already fired. - i.timer.Reset(d) + m.timer.Reset(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { +func (m *manager) handleIdleTimeout() { + if m.isClosed() { return } - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) return } // There has been activity on the channel since we last got here. Reset the // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) return } // This CAS operation is extremely likely to succeed given that there has // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { // This CAS operation can fail if an RPC started after we checked for // activity at the top of this method, or one was ongoing from before // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) + m.resetIdleTimer(time.Duration(m.timeout)) return } // Now that we've set the active calls count to -math.MaxInt32, it's time to // actually move to idle mode. - if i.tryEnterIdleMode() { + if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return } @@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. return false } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // An very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. @@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool { // No new RPCs have come in since we last set the active calls count value // -math.MaxInt32 in the timer callback. And since we have the lock, it is // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) return false } // Successfully entered idle mode. - i.actuallyIdle = true + m.actuallyIdle = true return true } -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { return nil } - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { + if err := m.exitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) return err } - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // exitIdleMode instructs the channel to exit idle mode. // // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if !i.actuallyIdle { + if !m.actuallyIdle { // This can happen in two scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. + // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get + // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. // // Either way, nothing to do here. return nil } - if err := i.enforcer.exitIdleMode(); err != nil { + if err := m.enforcer.ExitIdleMode(); err != nil { return fmt.Errorf("channel failed to exit idle mode: %v", err) } // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) return nil } -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { return } // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) // Decrement the active calls count. This count can temporarily go negative // when the timer callback is in the process of moving the channel to idle // mode, but one or more RPCs come in and complete before the timer callback // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) } -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 } -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c844..0d94c63e06 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,33 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +92,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +108,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +170,17 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +191,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e07..900bfb7160 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b51..7033191375 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948a..f0603871c9 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f33c..99e1e5b36c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f54f..03ef2fedd5 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81eb..b330ccedc8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa0..17f7a21b5a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf08480..d6f5c49358 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1505,14 +1498,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1514,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1550,13 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index ec4eef2134..6fa1eb4199 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -165,7 +165,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -233,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -850,7 +849,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -934,7 +933,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f5a..dc29d590e9 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,15 +30,13 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -87,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +304,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +389,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c896595..aac056e723 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,10 +43,6 @@ import ( "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -559,6 +551,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +585,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -703,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -736,7 +731,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f9759512..236837f415 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,21 +28,26 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. @@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b021..2e9cf66b4a 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -26,12 +26,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +65,36 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd45547854..73bd633643 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go new file mode 100644 index 0000000000..33b907a36d --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 0000000000..6f5c786b21 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 0000000000..62b56a8be0 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index d54c07676d..69fbfb621e 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index e2f9ebfbbc..76dae09d88 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -48,8 +48,9 @@ import ( "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" - v1alphagrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphapb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -63,9 +64,19 @@ type GRPCServer interface { var _ GRPCServer = (*grpc.Server)(nil) // Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. func Register(s GRPCServer) { - svr := NewServer(ServerOptions{Services: s}) - v1alphagrpc.RegisterServerReflectionServer(s, svr) + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) +} + +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } // ServiceInfoProvider is an interface used to retrieve metadata about the @@ -120,13 +131,27 @@ type ServerOptions struct { // NewServer returns a reflection server implementation using the given options. // This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) +} + +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages // should prefer to use Register instead. // // # Experimental // // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. -func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { if opts.DescriptorResolver == nil { opts.DescriptorResolver = protoregistry.GlobalFiles } @@ -141,7 +166,7 @@ func NewServer(opts ServerOptions) v1alphagrpc.ServerReflectionServer { } type serverReflectionServer struct { - v1alphagrpc.UnimplementedServerReflectionServer + v1alphareflectiongrpc.UnimplementedServerReflectionServer s ServiceInfoProvider descResolver protodesc.Resolver extResolver ExtensionResolver @@ -215,11 +240,11 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // listServices returns the names of services this server exposes. -func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { serviceInfo := s.s.GetServiceInfo() - resp := make([]*v1alphapb.ServiceResponse, 0, len(serviceInfo)) + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) for svc := range serviceInfo { - resp = append(resp, &v1alphapb.ServiceResponse{Name: svc}) + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) } sort.Slice(resp, func(i, j int) bool { return resp[i].Name < resp[j].Name @@ -228,7 +253,7 @@ func (s *serverReflectionServer) listServices() []*v1alphapb.ServiceResponse { } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -239,79 +264,79 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream v1alphagrpc.ServerR return err } - out := &v1alphapb.ServerReflectionResponse{ + out := &v1reflectionpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *v1alphapb.ServerReflectionRequest_FileByFilename: + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: var b [][]byte fd, err := s.descResolver.FindFileByPath(req.FileByFilename) if err == nil { b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) } if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphapb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *v1alphapb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphapb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &v1alphapb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphapb.ExtensionNumberResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *v1alphapb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1alphapb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphapb.ListServiceResponse{ + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ Service: s.listServices(), }, } diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 0000000000..0a4262342f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual defines a resolver that can be used to manually send resolved +// addresses to ClientConn. +package manual + +import ( + "sync" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, + UpdateStateCallback: func(error) {}, + ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) + // UpdateStateCallback is called when the UpdateState method is called on + // the resolver. The value passed as argument to this callback is the value + // returned by the resolver.ClientConn. Must not be nil. Must not be + // changed after the resolver may be built. + UpdateStateCallback func(err error) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. + ResolveNowCallback func(resolver.ResolveNowOptions) + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string + + // Fields actually belong to the resolver. + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State +} + +// InitialState adds initial state to the resolver so that UpdateState doesn't +// need to be explicitly called after Dial. +func (r *Resolver) InitialState(s resolver.State) { + r.lastSeenState = &s +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) + r.mu.Lock() + r.CC = cc + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) + } + r.mu.Unlock() + return r, nil +} + +// Scheme returns the manual resolver's scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ResolveNowCallback(o) +} + +// Close is a noop for Resolver. +func (r *Resolver) Close() { + r.CloseCallback() +} + +// UpdateState calls CC.UpdateState. +func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() + err := r.CC.UpdateState(s) + r.lastSeenState = &s + r.mu.Unlock() + r.UpdateStateCallback(err) +} + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.mu.Lock() + r.CC.ReportError(err) + r.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3efd..804be887de 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 353c10b69a..11384e228e 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,27 +104,29 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -190,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -254,20 +260,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -321,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index b408b3688f..d683305608 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() { ccr.mu.Unlock() // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done + <-ccr.serializer.Done() // Spawn a goroutine to close the resolver (since it may block trying to // cleanup all allocated resources) and return early. @@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) // which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) ccr.curState = s diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a30..b7723aa09c 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -692,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +727,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +792,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8869cc906f..8f60d42143 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -86,7 +86,7 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,20 +99,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} + mdata any } // Server is a gRPC server to serve RPC requests. @@ -164,10 +164,12 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ @@ -177,6 +179,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -228,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -268,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -550,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -625,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -633,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -648,14 +686,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -666,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -707,7 +745,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -908,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -944,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { f := func() { defer streamQuota.release() defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -956,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) wg.Wait() } @@ -1010,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1094,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1113,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1141,7 +1150,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1150,12 +1159,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1169,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1187,7 +1196,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1201,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1223,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1294,7 +1302,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1304,12 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1323,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1331,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1356,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1364,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1379,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1406,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1421,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1440,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1468,7 +1476,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1477,12 +1485,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1496,15 +1504,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1518,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1535,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1577,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1621,7 +1629,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1655,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1673,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1709,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1728,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -2054,12 +2079,12 @@ func validateSendCompressor(name, clientCompressors string) error { // atomicSemaphore implements a blocking, counting semaphore. acquire should be // called synchronously; release may be called asynchronously. type atomicSemaphore struct { - n int64 + n atomic.Int64 wait chan struct{} } func (q *atomicSemaphore) acquire() { - if atomic.AddInt64(&q.n, -1) < 0 { + if q.n.Add(-1) < 0 { // We ran out of quota. Block until a release happens. <-q.wait } @@ -2070,12 +2095,14 @@ func (q *atomicSemaphore) release() { // concurrent calls to acquire, but also note that with synchronous calls to // acquire, as our system does, n will never be less than -1. There are // fairness issues (queuing) to consider if this was to be generalized. - if atomic.AddInt64(&q.n, 1) <= 0 { + if q.n.Add(1) <= 0 { // An acquire was waiting on us. Unblock it. q.wait <- struct{}{} } } func newHandlerQuota(n uint32) *atomicSemaphore { - return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 0000000000..48a64cfe8e --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b78..4ab70e2d46 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index bcf2e4d81b..a93360efb8 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error has status nil, which maps to codes.OK. There // is no sensible behavior for this, so we turn it into // an error with codes.Unknown and discard the existing // status. return New(codes.Unknown, err.Error()), false } - return gs.GRPCStatus(), true + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error wraps an error that has status nil, which maps // to codes.OK. There is no sensible behavior for this, // so we turn it into an error with codes.Unknown and // discard the existing status. return New(codes.Unknown, err.Error()), false } - p := gs.GRPCStatus().Proto() + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 10092685b2..b14b2fbea2 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +129,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +158,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -507,7 +515,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1270,7 +1274,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1516,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40e..07f0125768 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b3e..9ded79321b 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 3cc7540621..6d2cadd79a 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.3" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b3d..bb480f1f9c 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -84,12 +84,18 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' @@ -106,7 +112,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -168,8 +174,6 @@ proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff --git a/vendor/honnef.co/go/tools/analysis/code/code.go b/vendor/honnef.co/go/tools/analysis/code/code.go index db7debc22a..f200363d9d 100644 --- a/vendor/honnef.co/go/tools/analysis/code/code.go +++ b/vendor/honnef.co/go/tools/analysis/code/code.go @@ -17,7 +17,6 @@ import ( "honnef.co/go/tools/go/types/typeutil" "honnef.co/go/tools/pattern" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/analysis" ) @@ -146,7 +145,7 @@ func CallName(pass *analysis.Pass, call *ast.CallExpr) string { switch idx := fun.(type) { case *ast.IndexExpr: fun = idx.X - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: fun = idx.X } @@ -277,7 +276,7 @@ func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity purity.Result return false case *ast.IndexExpr: return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: // In theory, none of the checks are necessary, as IndexListExpr only involves types. But there is no harm in // being safe. if MayHaveSideEffects(pass, expr.X, purity) { diff --git a/vendor/honnef.co/go/tools/analysis/code/visit.go b/vendor/honnef.co/go/tools/analysis/code/visit.go index f8bf2d1698..0f0d644a1e 100644 --- a/vendor/honnef.co/go/tools/analysis/code/visit.go +++ b/vendor/honnef.co/go/tools/analysis/code/visit.go @@ -30,7 +30,7 @@ func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matc // AST inspectors that already filter on nodes we're interested // in. m := &pattern.Matcher{TypesInfo: pass.TypesInfo} - ok := m.Match(q.Root, node) + ok := m.Match(q, node) return m, ok } diff --git a/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go b/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go index c558fabb00..dd6d655c32 100644 --- a/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go +++ b/vendor/honnef.co/go/tools/analysis/facts/deprecated/deprecated.go @@ -48,6 +48,7 @@ func deprecated(pass *analysis.Pass) (interface{}, error) { } return "" } + doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) { alt := extractDeprecatedMessage(docs) if alt == "" { @@ -86,7 +87,15 @@ func deprecated(pass *analysis.Pass) (interface{}, error) { switch node.Tok { case token.TYPE, token.CONST, token.VAR: docs = append(docs, node.Doc) - return true + for i := range node.Specs { + switch n := node.Specs[i].(type) { + case *ast.ValueSpec: + names = append(names, n.Names...) + case *ast.TypeSpec: + names = append(names, n.Name) + } + } + ret = true default: return false } diff --git a/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go b/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go index eb4c1bab49..d296b5b0f7 100644 --- a/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go +++ b/vendor/honnef.co/go/tools/analysis/facts/nilness/nilness.go @@ -230,6 +230,11 @@ func impl(pass *analysis.Pass, fn *ir.Function, seenFns map[*ir.Function]struct{ return neverNil case *ir.TypeAssert, *ir.ChangeInterface, *ir.Field, *ir.Const, *ir.GenericConst, *ir.Index, *ir.MapLookup, *ir.Parameter, *ir.Recv, *ir.TypeSwitch: return nilly + case *ir.CompositeValue: + // We can get here via composite literals of type parameters, for which typeutil.IsPointerLike doesn't + // currently return false (see https://staticcheck.io/issues/1364). However, we only emit ir.CompositeValue + // for value types, so we know it can't be nil. + return neverNil default: panic(fmt.Sprintf("internal error: unhandled type %T", v)) } diff --git a/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go b/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go index 4afc7a6a58..0f6895a8c3 100644 --- a/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go +++ b/vendor/honnef.co/go/tools/analysis/facts/purity/purity.go @@ -1,5 +1,8 @@ package purity +// TODO(dh): we should split this into two facts, one tracking actual purity, and one tracking side-effects. A function +// that returns a heap allocation isn't pure, but it may be free of side effects. + import ( "go/types" "reflect" @@ -53,6 +56,52 @@ var pureStdlib = map[string]struct{}{ "strings.TrimSpace": {}, "strings.TrimSuffix": {}, "(*net/http.Request).WithContext": {}, + "time.Now": {}, + "time.Parse": {}, + "time.ParseInLocation": {}, + "time.Unix": {}, + "time.UnixMicro": {}, + "time.UnixMilli": {}, + "(time.Time).Add": {}, + "(time.Time).AddDate": {}, + "(time.Time).After": {}, + "(time.Time).Before": {}, + "(time.Time).Clock": {}, + "(time.Time).Compare": {}, + "(time.Time).Date": {}, + "(time.Time).Day": {}, + "(time.Time).Equal": {}, + "(time.Time).Format": {}, + "(time.Time).GoString": {}, + "(time.Time).GobEncode": {}, + "(time.Time).Hour": {}, + "(time.Time).ISOWeek": {}, + "(time.Time).In": {}, + "(time.Time).IsDST": {}, + "(time.Time).IsZero": {}, + "(time.Time).Local": {}, + "(time.Time).Location": {}, + "(time.Time).MarshalBinary": {}, + "(time.Time).MarshalJSON": {}, + "(time.Time).MarshalText": {}, + "(time.Time).Minute": {}, + "(time.Time).Month": {}, + "(time.Time).Nanosecond": {}, + "(time.Time).Round": {}, + "(time.Time).Second": {}, + "(time.Time).String": {}, + "(time.Time).Sub": {}, + "(time.Time).Truncate": {}, + "(time.Time).UTC": {}, + "(time.Time).Unix": {}, + "(time.Time).UnixMicro": {}, + "(time.Time).UnixMilli": {}, + "(time.Time).UnixNano": {}, + "(time.Time).Weekday": {}, + "(time.Time).Year": {}, + "(time.Time).YearDay": {}, + "(time.Time).Zone": {}, + "(time.Time).ZoneBounds": {}, } func purity(pass *analysis.Pass) (interface{}, error) { @@ -99,10 +148,26 @@ func purity(pass *analysis.Pass) (interface{}, error) { return false } + var isBasic func(typ types.Type) bool + isBasic = func(typ types.Type) bool { + switch u := typ.Underlying().(type) { + case *types.Basic: + return true + case *types.Struct: + for i := 0; i < u.NumFields(); i++ { + if !isBasic(u.Field(i).Type()) { + return false + } + } + return true + default: + return false + } + } + for _, param := range fn.Params { - // TODO(dh): this may not be strictly correct. pure code - // can, to an extent, operate on non-basic types. - if _, ok := param.Type().Underlying().(*types.Basic); !ok { + // TODO(dh): this may not be strictly correct. pure code can, to an extent, operate on non-basic types. + if !isBasic(param.Type()) { return false } } @@ -134,6 +199,18 @@ func purity(pass *analysis.Pass) (interface{}, error) { } return true } + + var isStackAddr func(ir.Value) bool + isStackAddr = func(v ir.Value) bool { + switch v := v.(type) { + case *ir.Alloc: + return !v.Heap + case *ir.FieldAddr: + return isStackAddr(v.X) + default: + return false + } + } for _, b := range fn.Blocks { for _, ins := range b.Instrs { switch ins := ins.(type) { @@ -154,13 +231,22 @@ func purity(pass *analysis.Pass) (interface{}, error) { case *ir.Panic: return false case *ir.Store: - return false + if !isStackAddr(ins.Addr) { + return false + } case *ir.FieldAddr: - return false + if !isStackAddr(ins.X) { + return false + } case *ir.Alloc: - return false + // TODO(dh): make use of proper escape analysis + if ins.Heap { + return false + } case *ir.Load: - return false + if !isStackAddr(ins.X) { + return false + } } } } diff --git a/vendor/honnef.co/go/tools/analysis/lint/lint.go b/vendor/honnef.co/go/tools/analysis/lint/lint.go index b4e37d6f40..7d116a23d0 100644 --- a/vendor/honnef.co/go/tools/analysis/lint/lint.go +++ b/vendor/honnef.co/go/tools/analysis/lint/lint.go @@ -8,6 +8,7 @@ import ( "go/ast" "go/build" "go/token" + "regexp" "strconv" "strings" @@ -206,21 +207,28 @@ func (v *VersionFlag) String() string { return fmt.Sprintf("1.%d", *v) } -func (v *VersionFlag) Set(s string) error { - if len(s) < 3 { - return fmt.Errorf("invalid Go version: %q", s) - } - if s[0] != '1' { - return fmt.Errorf("invalid Go version: %q", s) - } - if s[1] != '.' { - return fmt.Errorf("invalid Go version: %q", s) +var goVersionRE = regexp.MustCompile(`^(?:go)?1.(\d+).*$`) + +// ParseGoVersion parses Go versions of the form 1.M, 1.M.N, or 1.M.NrcR, with an optional "go" prefix. It assumes that +// versions have already been verified and are valid. +func ParseGoVersion(s string) (int, bool) { + m := goVersionRE.FindStringSubmatch(s) + if m == nil { + return 0, false } - i, err := strconv.Atoi(s[2:]) + n, err := strconv.Atoi(m[1]) if err != nil { + return 0, false + } + return n, true +} + +func (v *VersionFlag) Set(s string) error { + n, ok := ParseGoVersion(s) + if !ok { return fmt.Errorf("invalid Go version: %q", s) } - *v = VersionFlag(i) + *v = VersionFlag(n) return nil } diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go index 8d9f084cff..a815a8a843 100644 --- a/vendor/honnef.co/go/tools/config/config.go +++ b/vendor/honnef.co/go/tools/config/config.go @@ -206,7 +206,7 @@ func parseConfigs(dir string) ([]Config, error) { return nil, err } var cfg Config - _, err = toml.DecodeReader(f, &cfg) + _, err = toml.NewDecoder(f).Decode(&cfg) f.Close() if err != nil { if err, ok := err.(toml.ParseError); ok { diff --git a/vendor/honnef.co/go/tools/go/ast/astutil/util.go b/vendor/honnef.co/go/tools/go/ast/astutil/util.go index 176bcde1d0..e04e1fb0c6 100644 --- a/vendor/honnef.co/go/tools/go/ast/astutil/util.go +++ b/vendor/honnef.co/go/tools/go/ast/astutil/util.go @@ -6,8 +6,6 @@ import ( "go/token" "reflect" "strings" - - "golang.org/x/exp/typeparams" ) func IsIdent(expr ast.Expr, ident string) bool { @@ -132,7 +130,7 @@ func CopyExpr(node ast.Expr) (ast.Expr, bool) { cp.X, ok1 = CopyExpr(cp.X) cp.Index, ok2 = CopyExpr(cp.Index) return &cp, ok1 && ok2 - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: var ok bool cp := *node cp.X, ok = CopyExpr(cp.X) @@ -280,8 +278,8 @@ func Equal(a, b ast.Node) bool { case *ast.IndexExpr: b := b.(*ast.IndexExpr) return Equal(a.X, b.X) && Equal(a.Index, b.Index) - case *typeparams.IndexListExpr: - b := b.(*typeparams.IndexListExpr) + case *ast.IndexListExpr: + b := b.(*ast.IndexListExpr) if len(a.Indices) != len(b.Indices) { return false } diff --git a/vendor/honnef.co/go/tools/go/ir/blockopt.go b/vendor/honnef.co/go/tools/go/ir/blockopt.go index d7a0e35676..5378861179 100644 --- a/vendor/honnef.co/go/tools/go/ir/blockopt.go +++ b/vendor/honnef.co/go/tools/go/ir/blockopt.go @@ -31,7 +31,6 @@ func markReachable(b *BasicBlock) { // deleteUnreachableBlocks marks all reachable blocks of f and // eliminates (nils) all others, including possibly cyclic subgraphs. -// func deleteUnreachableBlocks(f *Function) { const white, black = 0, -1 // We borrow b.gaps temporarily as the mark bit. @@ -64,7 +63,6 @@ func deleteUnreachableBlocks(f *Function) { // jumpThreading attempts to apply simple jump-threading to block b, // in which a->b->c become a->c if b is just a Jump. // The result is true if the optimization was applied. -// func jumpThreading(f *Function, b *BasicBlock) bool { if b.Index == 0 { return false // don't apply to entry block @@ -118,7 +116,6 @@ func jumpThreading(f *Function, b *BasicBlock) bool { // fuseBlocks attempts to apply the block fusion optimization to block // a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1. // The result is true if the optimization was applied. -// func fuseBlocks(f *Function, a *BasicBlock) bool { if len(a.Succs) != 1 { return false @@ -167,7 +164,6 @@ func fuseBlocks(f *Function, a *BasicBlock) bool { // optimizeBlocks() performs some simple block optimizations on a // completed function: dead block elimination, block fusion, jump // threading. -// func optimizeBlocks(f *Function) { if debugBlockOpt { f.WriteTo(os.Stderr) diff --git a/vendor/honnef.co/go/tools/go/ir/builder.go b/vendor/honnef.co/go/tools/go/ir/builder.go index e5566ade54..1a77ed0429 100644 --- a/vendor/honnef.co/go/tools/go/ir/builder.go +++ b/vendor/honnef.co/go/tools/go/ir/builder.go @@ -63,7 +63,6 @@ type builder struct { // to t or f depending on its value, performing various simplifications. // // Postcondition: fn.currentBlock is nil. -// func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { switch e := e.(type) { case *ast.ParenExpr: @@ -102,7 +101,6 @@ func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { // logicalBinop emits code to fn to evaluate e, a &&- or // ||-expression whose reified boolean value is wanted. // The value is returned. -// func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { rhs := fn.newBasicBlock("binop.rhs") done := fn.newBasicBlock("binop.done") @@ -161,7 +159,6 @@ func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { // Multi-result expressions include CallExprs in a multi-value // assignment or return statement, and "value,ok" uses of // TypeAssertExpr, IndexExpr (when X is a map), and Recv. -// func (b *builder) exprN(fn *Function, e ast.Expr) Value { typ := fn.Pkg.typeOf(e).(*types.Tuple) switch e := e.(type) { @@ -203,7 +200,6 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { // The result is nil if no special handling was required; in this case // the caller should treat this like an ordinary library function // call. -// func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, source ast.Node) Value { switch obj.Name() { case "make": @@ -303,10 +299,10 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // addressable expression e as being a potentially escaping pointer // value. For example, in this code: // -// a := A{ -// b: [1]B{B{c: 1}} -// } -// return &a.b[0].c +// a := A{ +// b: [1]B{B{c: 1}} +// } +// return &a.b[0].c // // the application of & causes a.b[0].c to have its address taken, // which means that ultimately the local variable a must be @@ -317,7 +313,6 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // - &x, including when implicit in method call or composite literals. // - a[:] iff a is an array (not *array) // - references to variables in lexically enclosing functions. -// func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) (RET lvalue) { switch e := e.(type) { case *ast.Ident: @@ -472,7 +467,6 @@ func (sb *storebuf) emit(fn *Function) { // storebuf sb so that they can be executed later. This allows correct // in-place update of existing variables when the RHS is a composite // literal that may reference parts of the LHS. -// func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf, source ast.Node) { // Can we initialize it in place? if e, ok := unparen(e).(*ast.CompositeLit); ok { @@ -520,7 +514,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * switch typeutil.CoreType(loc.typ()).Underlying().(type) { case *types.Struct, *types.Array: if sb != nil { - // Make sure we don't emit DebugRefs before the store has actually occured + // Make sure we don't emit DebugRefs before the store has actually occurred if ref := makeDebugRef(fn, e, addr, true); ref != nil { sb.storeDebugRef(ref) } @@ -545,7 +539,6 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // expr lowers a single-result expression e to IR form, emitting code // to fn and returning the Value defined by the expression. -// func (b *builder) expr(fn *Function, e ast.Expr) Value { e = unparen(e) @@ -719,8 +712,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { if _, ok := obj.(*types.Var); ok { return emitLoad(fn, v, e) // var (address) } - instances := typeparams.GetInstances(fn.Pkg.info) - if instance, ok := instances[e]; ok { + if instance, ok := fn.Pkg.info.Instances[e]; ok { // Instantiated generic function return makeInstance(fn.Prog, v.(*Function), instance.Type.(*types.Signature), instance.TypeArgs) } @@ -837,7 +829,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { panic("unexpected container type in IndexExpr: " + t.String()) } - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: // Instantiating a generic function return b.expr(fn, e.X) @@ -866,7 +858,6 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // must thus be addressable. // // escaping is defined as per builder.addr(). -// func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection, source ast.Node) Value { var v Value if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { @@ -886,7 +877,6 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se // setCallFunc populates the function parts of a CallCommon structure // (Func, Method, Recv, Args[0]) based on the kind of invocation // occurring in e. -// func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { // Is this a method call? if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { @@ -953,7 +943,6 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { // emitCallArgs emits to f code for the actual parameters of call e to // a (possibly built-in) function of effective type sig. // The argument values are appended to args, which is then returned. -// func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value { // f(x, y, z...): pass slice z straight through. if e.Ellipsis != 0 { @@ -1024,7 +1013,6 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx // setCall emits to fn code to evaluate all the parameters of a function // call e, and populates *c with those values. -// func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { // First deal with the f(...) part and optional receiver. b.setCallFunc(fn, e, c) @@ -1045,7 +1033,6 @@ func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, // localValueSpec emits to fn code to define all of the vars in the // function-local ValueSpec, spec. -// func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { switch { case len(spec.Values) == len(spec.Names): @@ -1088,7 +1075,6 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // isDef is true if this is a short variable declaration (:=). // // Note the similarity with localValueSpec. -// func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool, source ast.Node) { // Side effects of all LHSs and RHSs must occur in left-to-right order. lvals := make([]lvalue, len(lhss)) @@ -1154,46 +1140,61 @@ func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 { // // Because the elements of a composite literal may refer to the // variables being updated, as in the second line below, +// // x := T{a: 1} // x = T{a: x.a} -// all the reads must occur before all the writes. Thus all stores to -// loc are emitted to the storebuf sb for later execution. +// +// all the reads must occur before all the writes. This is implicitly handled by the write buffering effected by +// compositeElement and explicitly by the storebuf for when we don't use CompositeValue. // // A CompositeLit may have pointer type only in the recursive (nested) // case when the type name is implicit. e.g. in []*T{{}}, the inner // literal has type *T behaves like &T{}. // In that case, addr must hold a T, not a *T. -// func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) { typ := deref(fn.Pkg.typeOf(e)) switch t := typeutil.CoreType(typ).(type) { case *types.Struct: - if !isZero && len(e.Elts) != t.NumFields() { - // memclear - sb.store(&address{addr, nil}, zeroValue(fn, deref(addr.Type()), e), e) - isZero = true - } - for i, e := range e.Elts { - fieldIndex := i - if kv, ok := e.(*ast.KeyValueExpr); ok { - fname := kv.Key.(*ast.Ident).Name - for i, n := 0, t.NumFields(); i < n; i++ { - sf := t.Field(i) - if sf.Name() == fname { - fieldIndex = i - e = kv.Value - break + lvalue := &address{addr: addr, expr: e} + if len(e.Elts) == 0 { + if !isZero { + sb.store(lvalue, zeroValue(fn, deref(addr.Type()), e), e) + } + } else { + v := &CompositeValue{ + Values: make([]Value, t.NumFields()), + } + for i := 0; i < t.NumFields(); i++ { + v.Values[i] = emitConst(fn, zeroConst(t.Field(i).Type())) + } + v.setType(typ) + + for i, e := range e.Elts { + fieldIndex := i + if kv, ok := e.(*ast.KeyValueExpr); ok { + fname := kv.Key.(*ast.Ident).Name + for i, n := 0, t.NumFields(); i < n; i++ { + sf := t.Field(i) + if sf.Name() == fname { + fieldIndex = i + e = kv.Value + break + } } } + + ce := &compositeElement{ + cv: v, + idx: fieldIndex, + t: t.Field(fieldIndex).Type(), + expr: e, + } + b.assign(fn, ce, e, isZero, sb, e) + v.Bitmap.SetBit(&v.Bitmap, fieldIndex, 1) + v.NumSet++ } - sf := t.Field(fieldIndex) - faddr := &FieldAddr{ - X: addr, - Field: fieldIndex, - } - faddr.setType(types.NewPointer(sf.Type())) - fn.emit(faddr, e) - b.assign(fn, &address{addr: faddr, expr: e}, e, isZero, sb, e) + fn.emit(v, e) + sb.store(lvalue, v, e) } case *types.Array, *types.Slice: @@ -1207,43 +1208,96 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero case *types.Array: at = t array = addr + } - if !isZero && int64(len(e.Elts)) != at.Len() { - // memclear - sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) + var final Value + if len(e.Elts) == 0 { + if !isZero { + zc := emitConst(fn, zeroConst(at)) + final = zc } - } + } else { + if at.Len() == int64(len(e.Elts)) { + // The literal specifies all elements, so we can use a composite value + v := &CompositeValue{ + Values: make([]Value, at.Len()), + } + zc := emitConst(fn, zeroConst(at.Elem())) + for i := range v.Values { + v.Values[i] = zc + } + v.setType(at) - var idx *Const - for _, e := range e.Elts { - if kv, ok := e.(*ast.KeyValueExpr); ok { - idx = b.expr(fn, kv.Key).(*Const) - e = kv.Value - } else { - var idxval int64 - if idx != nil { - idxval = idx.Int64() + 1 + var idx *Const + for _, e := range e.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = emitConst(fn, intConst(idxval)).(*Const) + } + + iaddr := &compositeElement{ + cv: v, + idx: int(idx.Int64()), + t: at.Elem(), + expr: e, + } + + b.assign(fn, iaddr, e, true, sb, e) + v.Bitmap.SetBit(&v.Bitmap, int(idx.Int64()), 1) + v.NumSet++ } - idx = emitConst(fn, intConst(idxval)).(*Const) - } - iaddr := &IndexAddr{ - X: array, - Index: idx, - } - iaddr.setType(types.NewPointer(at.Elem())) - fn.emit(iaddr, e) - if t != at { // slice - // backing array is unaliased => storebuf not needed. - b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) + final = v + fn.emit(v, e) } else { - b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) + // Not all elements are specified. Populate the array with a series of stores, to guard against literals + // like []int{1<<62: 1}. + if !isZero { + // memclear + sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) + } + + var idx *Const + for _, e := range e.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + idx = b.expr(fn, kv.Key).(*Const) + e = kv.Value + } else { + var idxval int64 + if idx != nil { + idxval = idx.Int64() + 1 + } + idx = emitConst(fn, intConst(idxval)).(*Const) + } + iaddr := &IndexAddr{ + X: array, + Index: idx, + } + iaddr.setType(types.NewPointer(at.Elem())) + fn.emit(iaddr, e) + if t != at { // slice + // backing array is unaliased => storebuf not needed. + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) + } else { + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) + } + } } } - if t != at { // slice + if final != nil { + sb.store(&address{addr: array}, final, e) + } s := &Slice{X: array} s.setType(typ) sb.store(&address{addr: addr, expr: e}, fn.emit(s, e), e) + } else if final != nil { + sb.store(&address{addr: array, expr: e}, final, e) } case *types.Map: @@ -1395,7 +1449,6 @@ func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { // switchStmt emits to fn code for the switch statement s, optionally // labelled by label. -// func (b *builder) switchStmtDynamic(fn *Function, s *ast.SwitchStmt, label *lblock) { // We treat SwitchStmt like a sequential if-else chain. // Multiway dispatch can be recovered later by irutil.Switches() @@ -1656,7 +1709,6 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl // selectStmt emits to fn code for the select statement s, optionally // labelled by label. -// func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (noreturn bool) { if len(s.Body.List) == 0 { instr := &Select{Blocking: true} @@ -1843,7 +1895,6 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (no // forStmt emits to fn code for the for statement s, optionally // labelled by label. -// func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { // ...init... // jump loop @@ -1900,7 +1951,6 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { // over array, *array or slice value x. // The v result is defined only if tv is non-nil. // forPos is the position of the "for" token. -// func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { // // length = len(x) @@ -1998,7 +2048,6 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast. // Range/Next/Extract to iterate over map or string value x. // tk and tv are the types of the key/value results k and v, or nil // if the respective component is not wanted. -// func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { // // it = range x @@ -2065,7 +2114,6 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast // tk is the channel's element type, or nil if the k result is // not wanted // pos is the position of the '=' or ':=' token. -// func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, source ast.Node) (k Value, loop, done *BasicBlock) { // // loop: (target of continue) @@ -2124,7 +2172,6 @@ func (v *variable) load() Value { // rangeStmt emits to fn code for the range statement s, optionally // labelled by label. -// func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock, source ast.Node) { var tk, tv types.Type if s.Key != nil && !isBlankIdent(s.Key) { @@ -2331,7 +2378,7 @@ start: block = fn.labelledBlock(s.Label)._goto } j := emitJump(fn, block, s) - j.Comment = s.Tok.String() + j.comment = s.Tok.String() fn.currentBlock = fn.newBasicBlock("unreachable") case *ast.BlockStmt: @@ -2471,7 +2518,6 @@ func (b *builder) buildFunction(fn *Function) { // buildFuncDecl builds IR code for the function or method declared // by decl in package pkg. -// func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { id := decl.Name if isBlankIdent(id) { @@ -2494,7 +2540,6 @@ func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { // need only build a single package. // // Build is idempotent and thread-safe. -// func (prog *Program) Build() { for _, p := range prog.packages { p.Build() @@ -2508,7 +2553,6 @@ func (prog *Program) Build() { // error-free). // // Build is idempotent and thread-safe. -// func (p *Package) Build() { p.buildOnce.Do(p.build) } func (p *Package) build() { diff --git a/vendor/honnef.co/go/tools/go/ir/const.go b/vendor/honnef.co/go/tools/go/ir/const.go index 9dd7e83b9d..0faf3852a6 100644 --- a/vendor/honnef.co/go/tools/go/ir/const.go +++ b/vendor/honnef.co/go/tools/go/ir/const.go @@ -19,7 +19,6 @@ import ( // NewConst returns a new constant of the specified value and type. // val must be valid according to the specification of Const.Value. -// func NewConst(val constant.Value, typ types.Type) *Const { return &Const{ register: register{ @@ -37,7 +36,6 @@ func intConst(i int64) *Const { // nilConst returns a nil constant of the specified type, which may // be any reference type, including interfaces. -// func nilConst(typ types.Type) *Const { return NewConst(nil, typ) } @@ -58,7 +56,7 @@ func zeroConst(t types.Type) Constant { switch typ := tset.CoreType().(type) { case *types.Struct: - values := make([]Constant, typ.NumFields()) + values := make([]Value, typ.NumFields()) for i := 0; i < typ.NumFields(); i++ { values[i] = zeroConst(typ.Field(i).Type()) } @@ -67,7 +65,7 @@ func zeroConst(t types.Type) Constant { Values: values, } case *types.Tuple: - values := make([]Constant, typ.Len()) + values := make([]Value, typ.Len()) for i := 0; i < typ.Len(); i++ { values[i] = zeroConst(typ.At(i).Type()) } @@ -77,7 +75,7 @@ func zeroConst(t types.Type) Constant { } } - isNillable := func(term *typeparams.Term) bool { + isNillable := func(term *types.Term) bool { switch typ := term.Type().Underlying().(type) { case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature, *typeutil.Iterator: return true @@ -93,8 +91,8 @@ func zeroConst(t types.Type) Constant { } } - isInfo := func(info types.BasicInfo) func(*typeparams.Term) bool { - return func(term *typeparams.Term) bool { + isInfo := func(info types.BasicInfo) func(*types.Term) bool { + return func(term *types.Term) bool { basic, ok := term.Type().Underlying().(*types.Basic) if !ok { return false @@ -103,7 +101,7 @@ func zeroConst(t types.Type) Constant { } } - isArray := func(term *typeparams.Term) bool { + isArray := func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Array) return ok } @@ -147,6 +145,11 @@ func (c *Const) RelString(from *types.Package) string { } func (c *Const) String() string { + if c.block == nil { + // Constants don't have a block till late in the compilation process. But we want to print consts during + // debugging. + return c.RelString(nil) + } return c.RelString(c.Parent().pkg()) } @@ -162,7 +165,7 @@ func (v *AggregateConst) RelString(pkg *types.Package) string { values := make([]string, len(v.Values)) for i, v := range v.Values { if v != nil { - values[i] = v.RelString(pkg) + values[i] = v.Name() } else { values[i] = "nil" } @@ -170,6 +173,13 @@ func (v *AggregateConst) RelString(pkg *types.Package) string { return fmt.Sprintf("AggregateConst <%s> (%s)", relType(v.Type(), pkg), strings.Join(values, ", ")) } +func (v *AggregateConst) String() string { + if v.block == nil { + return v.RelString(nil) + } + return v.RelString(v.Parent().pkg()) +} + func (v *GenericConst) RelString(pkg *types.Package) string { return fmt.Sprintf("GenericConst <%s>", relType(v.Type(), pkg)) } @@ -178,10 +188,6 @@ func (v *GenericConst) String() string { return v.RelString(v.Parent().pkg()) } -func (v *AggregateConst) String() string { - return v.RelString(v.Parent().pkg()) -} - // IsNil returns true if this constant represents a typed or untyped nil value. func (c *Const) IsNil() bool { return c.Value == nil @@ -189,7 +195,6 @@ func (c *Const) IsNil() bool { // Int64 returns the numeric value of this constant truncated to fit // a signed 64-bit integer. -// func (c *Const) Int64() int64 { switch x := constant.ToInt(c.Value); x.Kind() { case constant.Int: @@ -206,7 +211,6 @@ func (c *Const) Int64() int64 { // Uint64 returns the numeric value of this constant truncated to fit // an unsigned 64-bit integer. -// func (c *Const) Uint64() uint64 { switch x := constant.ToInt(c.Value); x.Kind() { case constant.Int: @@ -223,7 +227,6 @@ func (c *Const) Uint64() uint64 { // Float64 returns the numeric value of this constant truncated to fit // a float64. -// func (c *Const) Float64() float64 { f, _ := constant.Float64Val(c.Value) return f @@ -231,7 +234,6 @@ func (c *Const) Float64() float64 { // Complex128 returns the complex value of this constant truncated to // fit a complex128. -// func (c *Const) Complex128() complex128 { re, _ := constant.Float64Val(constant.Real(c.Value)) im, _ := constant.Float64Val(constant.Imag(c.Value)) @@ -253,7 +255,15 @@ func (c *AggregateConst) equal(o Constant) bool { return false } // TODO(dh): don't use == for types, this will miss identical pointer types, among others - return c.typ == oc.typ + if c.typ != oc.typ { + return false + } + for i, v := range c.Values { + if !v.(Constant).equal(oc.Values[i].(Constant)) { + return false + } + } + return true } func (c *ArrayConst) equal(o Constant) bool { diff --git a/vendor/honnef.co/go/tools/go/ir/create.go b/vendor/honnef.co/go/tools/go/ir/create.go index 5e7f6ed94b..28e7da7e94 100644 --- a/vendor/honnef.co/go/tools/go/ir/create.go +++ b/vendor/honnef.co/go/tools/go/ir/create.go @@ -27,7 +27,6 @@ const avgInstructionsPerBlock = 16 // NewProgram returns a new IR Program. // // mode controls diagnostics and checking during IR construction. -// func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { prog := &Program{ Fset: fset, @@ -51,7 +50,6 @@ func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { // For objects from Go source code, syntax is the associated syntax // tree (for funcs and vars only); it will be used during the build // phase. -// func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { name := obj.Name() switch obj := obj.(type) { @@ -128,7 +126,6 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { // membersFromDecl populates package pkg with members for each // typechecker object (var, func, const or type) associated with the // specified decl. -// func membersFromDecl(pkg *Package, decl ast.Decl) { switch decl := decl.(type) { case *ast.GenDecl: // import, const, type or var @@ -177,7 +174,6 @@ func membersFromDecl(pkg *Package, decl ast.Decl) { // // The real work of building IR form for each function is not done // until a subsequent call to Package.Build(). -// func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { p := &Package{ Prog: prog, @@ -260,7 +256,6 @@ var printMu sync.Mutex // AllPackages returns a new slice containing all packages in the // program prog in unspecified order. -// func (prog *Program) AllPackages() []*Package { pkgs := make([]*Package, 0, len(prog.packages)) for _, pkg := range prog.packages { @@ -282,7 +277,6 @@ func (prog *Program) AllPackages() []*Package { // false---yet this function remains very convenient. // Clients should use (*Program).Package instead where possible. // IR doesn't really need a string-keyed map of packages. -// func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] } diff --git a/vendor/honnef.co/go/tools/go/ir/doc.go b/vendor/honnef.co/go/tools/go/ir/doc.go index 7158a0aec2..5ee6637db4 100644 --- a/vendor/honnef.co/go/tools/go/ir/doc.go +++ b/vendor/honnef.co/go/tools/go/ir/doc.go @@ -39,66 +39,67 @@ // // The primary interfaces of this package are: // -// - Member: a named member of a Go package. -// - Value: an expression that yields a value. -// - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) +// - Member: a named member of a Go package. +// - Value: an expression that yields a value. +// - Instruction: a statement that consumes values and performs computation. +// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) // // A computation that yields a result implements both the Value and // Instruction interfaces. The following table shows for each // concrete type which of these interfaces it implements. // -// Value? Instruction? Member? -// *Alloc ✔ ✔ -// *BinOp ✔ ✔ -// *BlankStore ✔ -// *Builtin ✔ -// *Call ✔ ✔ -// *ChangeInterface ✔ ✔ -// *ChangeType ✔ ✔ -// *Const ✔ ✔ -// *Convert ✔ ✔ -// *DebugRef ✔ -// *Defer ✔ ✔ -// *Extract ✔ ✔ -// *Field ✔ ✔ -// *FieldAddr ✔ ✔ -// *FreeVar ✔ -// *Function ✔ ✔ (func) -// *Global ✔ ✔ (var) -// *Go ✔ ✔ -// *If ✔ -// *Index ✔ ✔ -// *IndexAddr ✔ ✔ -// *Jump ✔ -// *Load ✔ ✔ -// *MakeChan ✔ ✔ -// *MakeClosure ✔ ✔ -// *MakeInterface ✔ ✔ -// *MakeMap ✔ ✔ -// *MakeSlice ✔ ✔ -// *MapLookup ✔ ✔ -// *MapUpdate ✔ ✔ -// *NamedConst ✔ (const) -// *Next ✔ ✔ -// *Panic ✔ -// *Parameter ✔ ✔ -// *Phi ✔ ✔ -// *Range ✔ ✔ -// *Recv ✔ ✔ -// *Return ✔ -// *RunDefers ✔ -// *Select ✔ ✔ -// *Send ✔ ✔ -// *Sigma ✔ ✔ -// *Slice ✔ ✔ -// *SliceToArrayPointer ✔ ✔ -// *Store ✔ ✔ -// *StringLookup ✔ ✔ -// *Type ✔ (type) -// *TypeAssert ✔ ✔ -// *UnOp ✔ ✔ -// *Unreachable ✔ +// Value? Instruction? Member? +// *Alloc ✔ ✔ +// *BinOp ✔ ✔ +// *BlankStore ✔ +// *Builtin ✔ +// *Call ✔ ✔ +// *ChangeInterface ✔ ✔ +// *ChangeType ✔ ✔ +// *Const ✔ ✔ +// *Convert ✔ ✔ +// *DebugRef ✔ +// *Defer ✔ ✔ +// *Extract ✔ ✔ +// *Field ✔ ✔ +// *FieldAddr ✔ ✔ +// *FreeVar ✔ +// *Function ✔ ✔ (func) +// *Global ✔ ✔ (var) +// *Go ✔ ✔ +// *If ✔ +// *Index ✔ ✔ +// *IndexAddr ✔ ✔ +// *Jump ✔ +// *Load ✔ ✔ +// *MakeChan ✔ ✔ +// *MakeClosure ✔ ✔ +// *MakeInterface ✔ ✔ +// *MakeMap ✔ ✔ +// *MakeSlice ✔ ✔ +// *MapLookup ✔ ✔ +// *MapUpdate ✔ ✔ +// *NamedConst ✔ (const) +// *Next ✔ ✔ +// *Panic ✔ +// *Parameter ✔ ✔ +// *Phi ✔ ✔ +// *Range ✔ ✔ +// *Recv ✔ ✔ +// *Return ✔ +// *RunDefers ✔ +// *Select ✔ ✔ +// *Send ✔ ✔ +// *Sigma ✔ ✔ +// *Slice ✔ ✔ +// *SliceToArrayPointer ✔ ✔ +// *SliceToArray ✔ ✔ +// *Store ✔ ✔ +// *StringLookup ✔ ✔ +// *Type ✔ (type) +// *TypeAssert ✔ ✔ +// *UnOp ✔ ✔ +// *Unreachable ✔ // // Other key types in this package include: Program, Package, Function // and BasicBlock. @@ -126,5 +127,4 @@ // of trying to determine corresponding elements across the four // domains of source locations, ast.Nodes, types.Objects, // ir.Values/Instructions. -// package ir diff --git a/vendor/honnef.co/go/tools/go/ir/dom.go b/vendor/honnef.co/go/tools/go/ir/dom.go index 13ecd47cb9..4febd284ba 100644 --- a/vendor/honnef.co/go/tools/go/ir/dom.go +++ b/vendor/honnef.co/go/tools/go/ir/dom.go @@ -29,12 +29,10 @@ import ( // Idom returns the block that immediately dominates b: // its parent in the dominator tree, if any. // The entry node (b.Index==0) does not have a parent. -// func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } // Dominees returns the list of blocks that b immediately dominates: // its children in the dominator tree. -// func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children } // Dominates reports whether b dominates c. @@ -50,7 +48,6 @@ func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre // DomPreorder returns a new slice containing the blocks of f in // dominator tree preorder. -// func (f *Function) DomPreorder() []*BasicBlock { n := len(f.Blocks) order := make(byDomPreorder, n) @@ -68,7 +65,6 @@ type domInfo struct { // buildDomTree computes the dominator tree of f using the LT algorithm. // Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). -// func buildDomTree(fn *Function) { // The step numbers refer to the original LT paper; the // reordering is due to Georgiadis. @@ -277,7 +273,6 @@ func buildPostDomTree(fn *Function) { // numberDomTree sets the pre- and post-order numbers of a depth-first // traversal of the dominator tree rooted at v. These are used to // answer dominance queries in constant time. -// func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { v.dom.pre = pre pre++ @@ -292,7 +287,6 @@ func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { // numberPostDomTree sets the pre- and post-order numbers of a depth-first // traversal of the post-dominator tree rooted at v. These are used to // answer post-dominance queries in constant time. -// func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { v.pdom.pre = pre pre++ @@ -310,7 +304,6 @@ func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { // computed by the LT algorithm by comparing against the dominance // relation computed by a naive Kildall-style forward dataflow // analysis (Algorithm 10.16 from the "Dragon" book). -// func sanityCheckDomTree(f *Function) { n := len(f.Blocks) @@ -395,6 +388,7 @@ func sanityCheckDomTree(f *Function) { // Printing functions ---------------------------------------- // printDomTree prints the dominator tree as text, using indentation. +// //lint:ignore U1000 used during debugging func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) @@ -405,6 +399,7 @@ func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. +// //lint:ignore U1000 used during debugging func printDomTreeDot(buf io.Writer, f *Function) { fmt.Fprintln(buf, "//", f) @@ -432,6 +427,7 @@ func printDomTreeDot(buf io.Writer, f *Function) { } // printDomTree prints the dominator tree as text, using indentation. +// //lint:ignore U1000 used during debugging func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) @@ -442,6 +438,7 @@ func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. +// //lint:ignore U1000 used during debugging func printPostDomTreeDot(buf io.Writer, f *Function) { fmt.Fprintln(buf, "//", f) diff --git a/vendor/honnef.co/go/tools/go/ir/emit.go b/vendor/honnef.co/go/tools/go/ir/emit.go index 7b23041f36..f6a1ef373b 100644 --- a/vendor/honnef.co/go/tools/go/ir/emit.go +++ b/vendor/honnef.co/go/tools/go/ir/emit.go @@ -20,7 +20,6 @@ import ( // emitNew emits to f a new (heap Alloc) instruction allocating an // object of type typ. pos is the optional source location. -// func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { v := &Alloc{Heap: true} v.setType(types.NewPointer(typ)) @@ -30,7 +29,6 @@ func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { // emitLoad emits to f an instruction to load the address addr into a // new temporary, and returns the value so defined. -// func emitLoad(f *Function, addr Value, source ast.Node) *Load { v := &Load{X: addr} v.setType(deref(addr.Type())) @@ -49,7 +47,6 @@ func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.No // emitDebugRef emits to f a DebugRef pseudo-instruction associating // expression e with value v. -// func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { ref := makeDebugRef(f, e, v, isAddr) if ref == nil { @@ -89,7 +86,6 @@ func makeDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) *DebugRef { // where op is an eager shift, logical or arithmetic operation. // (Use emitCompare() for comparisons and Builder.logicalBinop() for // non-eager operations.) -// func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value { switch op { case token.SHL, token.SHR: @@ -124,7 +120,6 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast // emitCompare emits to f code compute the boolean result of // comparison comparison 'x op y'. -// func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value { xt := x.Type().Underlying() yt := y.Type().Underlying() @@ -169,7 +164,6 @@ func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value // isValuePreserving returns true if a conversion from ut_src to // ut_dst is value-preserving, i.e. just a change of type. // Precondition: neither argument is a named type. -// func isValuePreserving(ut_src, ut_dst types.Type) bool { // Identical underlying types? if types.IdenticalIgnoreTags(ut_dst, ut_src) { @@ -194,7 +188,6 @@ func isValuePreserving(ut_src, ut_dst types.Type) bool { // and returns the converted value. Implicit conversions are required // by language assignability rules in assignments, parameter passing, // etc. -// func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { t_src := val.Type() @@ -210,8 +203,8 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { tset_src := typeutil.NewTypeSet(ut_src) // Just a change of type, but not value or representation? - if tset_src.All(func(termSrc *typeparams.Term) bool { - return tset_dst.All(func(termDst *typeparams.Term) bool { + if tset_src.All(func(termSrc *types.Term) bool { + return tset_dst.All(func(termDst *types.Term) bool { return isValuePreserving(termSrc.Type().Underlying(), termDst.Type().Underlying()) }) }) { @@ -262,8 +255,8 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { } // Conversion from slice to array pointer? - if tset_src.All(func(termSrc *typeparams.Term) bool { - return tset_dst.All(func(termDst *typeparams.Term) bool { + if tset_src.All(func(termSrc *types.Term) bool { + return tset_dst.All(func(termDst *types.Term) bool { if slice, ok := termSrc.Type().Underlying().(*types.Slice); ok { if ptr, ok := termDst.Type().Underlying().(*types.Pointer); ok { if arr, ok := ptr.Elem().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { @@ -279,11 +272,30 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { return f.emit(c, source) } + // Conversion from slice to array. This is almost the same as converting from slice to array pointer, then + // dereferencing the pointer. Except that a nil slice can be converted to [0]T, whereas converting a nil slice to + // (*[0]T) results in a nil pointer, dereferencing which would panic. To hide the extra branching we use a dedicated + // instruction, SliceToArray. + if tset_src.All(func(termSrc *types.Term) bool { + return tset_dst.All(func(termDst *types.Term) bool { + if slice, ok := termSrc.Type().Underlying().(*types.Slice); ok { + if arr, ok := termDst.Type().Underlying().(*types.Array); ok && types.Identical(slice.Elem(), arr.Elem()) { + return true + } + } + return false + }) + }) { + c := &SliceToArray{X: val} + c.setType(t_dst) + return f.emit(c, source) + } + // A representation-changing conversion? // At least one of {ut_src,ut_dst} must be *Basic. // (The other may be []byte or []rune.) - ok1 := tset_src.Any(func(term *typeparams.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) - ok2 := tset_dst.Any(func(term *typeparams.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + ok1 := tset_src.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + ok2 := tset_dst.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) if ok1 || ok2 { c := &Convert{X: val} c.setType(t_dst) @@ -295,21 +307,17 @@ func emitConv(f *Function, val Value, t_dst types.Type, source ast.Node) Value { // emitStore emits to f an instruction to store value val at location // addr, applying implicit conversions as required by assignability rules. -// func emitStore(f *Function, addr, val Value, source ast.Node) *Store { s := &Store{ Addr: addr, Val: emitConv(f, val, deref(addr.Type()), source), } - // make sure we call getMem after the call to emitConv, which may - // itself update the memory state f.emit(s, source) return s } // emitJump emits to f a jump to target, and updates the control-flow graph. // Postcondition: f.currentBlock is nil. -// func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { b := f.currentBlock j := new(Jump) @@ -322,7 +330,6 @@ func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { // emitIf emits to f a conditional jump to tblock or fblock based on // cond, and updates the control-flow graph. // Postcondition: f.currentBlock is nil. -// func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If { b := f.currentBlock stmt := &If{Cond: cond} @@ -335,7 +342,6 @@ func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node // emitExtract emits to f an instruction to extract the index'th // component of tuple. It returns the extracted value. -// func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { e := &Extract{Tuple: tuple, Index: index} e.setType(tuple.Type().(*types.Tuple).At(index).Type()) @@ -344,7 +350,6 @@ func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { // emitTypeAssert emits to f a type assertion value := x.(t) and // returns the value. x.Type() must be an interface. -// func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { a := &TypeAssert{X: x, AssertedType: t} a.setType(t) @@ -353,7 +358,6 @@ func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { // emitTypeTest emits to f a type test value,ok := x.(t) and returns // a (value, ok) tuple. x.Type() must be an interface. -// func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { a := &TypeAssert{ X: x, @@ -372,7 +376,6 @@ func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { // Intended for wrapper methods. // Precondition: f does/will not use deferred procedure calls. // Postcondition: f.currentBlock is nil. -// func emitTailCall(f *Function, call *Call, source ast.Node) { tresults := f.Signature.Results() nr := tresults.Len() @@ -413,7 +416,6 @@ func emitTailCall(f *Function, call *Call, source ast.Node) { // If v is the address of a struct, the result will be the address of // a field; if it is the value of a struct, the result will be the // value of a field. -// func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value { for _, index := range indices { // We may have a generic type containing a pointer, or a pointer to a generic type containing a struct. A @@ -450,7 +452,6 @@ func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node // will be the field's address; otherwise the result will be the // field's value. // Ident id is used for position and debug info. -// func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value { // We may have a generic type containing a pointer, or a pointer to a generic type containing a struct. A // pointer to a generic containing a pointer to a struct shouldn't be possible because the outer pointer gets @@ -484,12 +485,64 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. // zeroValue emits to f code to produce a zero value of type t, // and returns it. -// func zeroValue(f *Function, t types.Type, source ast.Node) Value { return emitConst(f, zeroConst(t)) } +type constKey struct { + typ types.Type + value constant.Value +} + func emitConst(f *Function, c Constant) Constant { - f.consts = append(f.consts, c) - return c + if f.consts == nil { + f.consts = map[constKey]constValue{} + } + + typ := c.Type() + var val constant.Value + switch c := c.(type) { + case *Const: + val = c.Value + case *ArrayConst, *GenericConst: + // These can only represent zero values, so all we need is the type + case *AggregateConst: + candidates, _ := f.aggregateConsts.At(c.typ) + for _, candidate := range candidates { + if c.equal(candidate) { + return candidate + } + } + + for i := range c.Values { + c.Values[i] = emitConst(f, c.Values[i].(Constant)) + } + + c.setBlock(f.Blocks[0]) + rands := c.Operands(nil) + updateOperandsReferrers(c, rands) + candidates = append(candidates, c) + f.aggregateConsts.Set(c.typ, candidates) + return c + + default: + panic(fmt.Sprintf("unexpected type %T", c)) + } + k := constKey{ + typ: typ, + value: val, + } + dup, ok := f.consts[k] + if ok { + return dup.c + } else { + c.setBlock(f.Blocks[0]) + f.consts[k] = constValue{ + c: c, + idx: len(f.consts), + } + rands := c.Operands(nil) + updateOperandsReferrers(c, rands) + return c + } } diff --git a/vendor/honnef.co/go/tools/go/ir/exits.go b/vendor/honnef.co/go/tools/go/ir/exits.go index 851b62c4f8..03aa2866ce 100644 --- a/vendor/honnef.co/go/tools/go/ir/exits.go +++ b/vendor/honnef.co/go/tools/go/ir/exits.go @@ -110,6 +110,19 @@ func (b *builder) buildExits(fn *Function) { // all of these call os.Exit after logging fn.NoReturn = AlwaysExits } + case "k8s.io/klog/v2": + switch obj.(*types.Func).FullName() { + case "k8s.io/klog/v2.Exit", + "k8s.io/klog/v2.ExitDepth", + "k8s.io/klog/v2.Exitf", + "k8s.io/klog/v2.Exitln", + "k8s.io/klog/v2.Fatal", + "k8s.io/klog/v2.FatalDepth", + "k8s.io/klog/v2.Fatalf", + "k8s.io/klog/v2.Fatalln": + // all of these call os.Exit after logging + fn.NoReturn = AlwaysExits + } } } @@ -330,7 +343,7 @@ func (b *builder) addUnreachables(fn *Function) { var c Call c.Call.Value = &Builtin{ name: "ir:noreturnWasPanic", - sig: types.NewSignature(nil, + sig: types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple(anonVar(types.Typ[types.Bool])), false, diff --git a/vendor/honnef.co/go/tools/go/ir/func.go b/vendor/honnef.co/go/tools/go/ir/func.go index ca42b5c0ea..4449b405d2 100644 --- a/vendor/honnef.co/go/tools/go/ir/func.go +++ b/vendor/honnef.co/go/tools/go/ir/func.go @@ -10,13 +10,15 @@ import ( "bytes" "fmt" "go/ast" - "go/constant" "go/format" "go/token" "go/types" "io" "os" + "sort" "strings" + + "honnef.co/go/tools/go/types/typeutil" ) // addEdge adds a control-flow graph edge from from to to. @@ -53,14 +55,12 @@ func (b *BasicBlock) Parent() *Function { return b.parent } // String returns a human-readable label of this block. // It is not guaranteed unique within the function. -// func (b *BasicBlock) String() string { return fmt.Sprintf("%d", b.Index) } // emit appends an instruction to the current basic block. // If the instruction defines a Value, it is returned. -// func (b *BasicBlock) emit(i Instruction, source ast.Node) Value { i.setSource(source) i.setBlock(b) @@ -112,7 +112,6 @@ func (b *BasicBlock) phis() []Instruction { // replacePred replaces all occurrences of p in b's predecessor list with q. // Ordinarily there should be at most one. -// func (b *BasicBlock) replacePred(p, q *BasicBlock) { for i, pred := range b.Preds { if pred == p { @@ -123,7 +122,6 @@ func (b *BasicBlock) replacePred(p, q *BasicBlock) { // replaceSucc replaces all occurrences of p in b's successor list with q. // Ordinarily there should be at most one. -// func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { for i, succ := range b.Succs { if succ == p { @@ -135,7 +133,6 @@ func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { // removePred removes all occurrences of p in b's // predecessor list and φ-nodes. // Ordinarily there should be at most one. -// func (b *BasicBlock) removePred(p *BasicBlock) { phis := b.phis() @@ -169,7 +166,6 @@ func (b *BasicBlock) removePred(p *BasicBlock) { // Destinations associated with unlabelled for/switch/select stmts. // We push/pop one of these as we enter/leave each construct and for // each BranchStmt we scan for the innermost target of the right type. -// type targets struct { tail *targets // rest of stack _break *BasicBlock @@ -180,7 +176,6 @@ type targets struct { // Destinations associated with a labelled block. // We populate these as labels are encountered in forward gotos or // labelled statements. -// type lblock struct { _goto *BasicBlock _break *BasicBlock @@ -189,9 +184,14 @@ type lblock struct { // labelledBlock returns the branch target associated with the // specified label, creating it if needed. -// func (f *Function) labelledBlock(label *ast.Ident) *lblock { - obj := f.Pkg.objectOf(label) + obj := f.Pkg.info.ObjectOf(label) + if obj == nil { + // Blank label, as in '_:' - don't store to f.lblocks, this label can never be referred to; just return a fresh + // lbock. + return &lblock{_goto: f.newBasicBlock(label.Name)} + } + lb := f.lblocks[obj] if lb == nil { lb = &lblock{_goto: f.newBasicBlock(label.Name)} @@ -205,7 +205,6 @@ func (f *Function) labelledBlock(label *ast.Ident) *lblock { // addParam adds a (non-escaping) parameter to f.Params of the // specified name, type and source position. -// func (f *Function) addParam(name string, typ types.Type, source ast.Node) *Parameter { var b *BasicBlock if len(f.Blocks) > 0 { @@ -240,7 +239,6 @@ func (f *Function) addParamObj(obj types.Object, source ast.Node) *Parameter { // addSpilledParam declares a parameter that is pre-spilled to the // stack; the function body will load/store the spilled location. // Subsequent lifting will eliminate spills where possible. -// func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { param := f.addParamObj(obj, source) spill := &Alloc{} @@ -255,7 +253,6 @@ func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { // startBody initializes the function prior to generating IR code for its body. // Precondition: f.Type() already set. -// func (f *Function) startBody() { entry := f.newBasicBlock("entry") f.currentBlock = entry @@ -304,7 +301,6 @@ func (f *Function) exitBlock() { // f.startBody() was called. // Postcondition: // len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0) -// func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) { // Receiver (at most one inner iteration). if recv != nil { @@ -366,141 +362,85 @@ func numberNodes(f *Function) { } } +func updateOperandsReferrers(instr Instruction, ops []*Value) { + for _, op := range ops { + if r := *op; r != nil { + if refs := (*op).Referrers(); refs != nil { + if len(*refs) == 0 { + // per median, each value has two referrers, so we can avoid one call into growslice + // + // Note: we experimented with allocating + // sequential scratch space, but we + // couldn't find a value that gave better + // performance than making many individual + // allocations + *refs = make([]Instruction, 1, 2) + (*refs)[0] = instr + } else { + *refs = append(*refs, instr) + } + } + } + } +} + // buildReferrers populates the def/use information in all non-nil // Value.Referrers slice. // Precondition: all such slices are initially empty. func buildReferrers(f *Function) { var rands []*Value + for _, b := range f.Blocks { for _, instr := range b.Instrs { rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if r := *rand; r != nil { - if ref := r.Referrers(); ref != nil { - if len(*ref) == 0 { - // per median, each value has two referrers, so we can avoid one call into growslice - // - // Note: we experimented with allocating - // sequential scratch space, but we - // couldn't find a value that gave better - // performance than making many individual - // allocations - *ref = make([]Instruction, 1, 2) - (*ref)[0] = instr - } else { - *ref = append(*ref, instr) - } - } - } - } + updateOperandsReferrers(instr, rands) } } + + for _, c := range f.consts { + rands = c.c.Operands(rands[:0]) + updateOperandsReferrers(c.c, rands) + } } func (f *Function) emitConsts() { - if len(f.Blocks) == 0 { + defer func() { f.consts = nil + f.aggregateConsts = typeutil.Map[[]*AggregateConst]{} + }() + + if len(f.Blocks) == 0 { return } // TODO(dh): our deduplication only works on booleans and // integers. other constants are represented as pointers to // things. - if len(f.consts) == 0 { - return - } else if len(f.consts) <= 32 { - f.emitConstsFew() - } else { - f.emitConstsMany() - } -} - -func (f *Function) emitConstsFew() { - dedup := make([]Constant, 0, 32) + head := make([]constValue, 0, len(f.consts)) for _, c := range f.consts { - if len(*c.Referrers()) == 0 { - continue - } - found := false - for _, d := range dedup { - if c.equal(d) { - replaceAll(c, d) - found = true - break - } - } - if !found { - dedup = append(dedup, c) + if len(*c.c.Referrers()) == 0 { + // TODO(dh): killing a const may make other consts dead, too + killInstruction(c.c) + } else { + head = append(head, c) } } - - instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(dedup)) - for i, c := range dedup { - instrs[i] = c - c.setBlock(f.Blocks[0]) - } - copy(instrs[len(dedup):], f.Blocks[0].Instrs) - f.Blocks[0].Instrs = instrs - f.consts = nil -} - -func (f *Function) emitConstsMany() { - type constKey struct { - typ types.Type - value constant.Value + sort.Slice(head, func(i, j int) bool { + return head[i].idx < head[j].idx + }) + entry := f.Blocks[0] + instrs := make([]Instruction, 0, len(entry.Instrs)+len(head)) + for _, c := range head { + instrs = append(instrs, c.c) } - - m := make(map[constKey]Value, len(f.consts)) - areNil := 0 - for i, c := range f.consts { - if len(*c.Referrers()) == 0 { - f.consts[i] = nil - areNil++ - continue + f.aggregateConsts.Iterate(func(key types.Type, value []*AggregateConst) { + for _, c := range value { + instrs = append(instrs, c) } + }) - var typ types.Type - var val constant.Value - switch c := c.(type) { - case *Const: - typ = c.typ - val = c.Value - case *ArrayConst: - // ArrayConst can only encode zero constants, so all we need is the type - typ = c.typ - case *AggregateConst: - // ArrayConst can only encode zero constants, so all we need is the type - typ = c.typ - case *GenericConst: - typ = c.typ - default: - panic(fmt.Sprintf("unexpected type %T", c)) - } - k := constKey{ - typ: typ, - value: val, - } - if dup, ok := m[k]; !ok { - m[k] = c - } else { - f.consts[i] = nil - areNil++ - replaceAll(c, dup) - } - } - - instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(f.consts)-areNil) - i := 0 - for _, c := range f.consts { - if c != nil { - instrs[i] = c - c.setBlock(f.Blocks[0]) - i++ - } - } - copy(instrs[i:], f.Blocks[0].Instrs) - f.Blocks[0].Instrs = instrs - f.consts = nil + instrs = append(instrs, entry.Instrs...) + entry.Instrs = instrs } // buildFakeExits ensures that every block in the function is @@ -593,7 +533,12 @@ func (f *Function) finishBody() { buildPostDomTree(f) if f.Prog.mode&NaiveForm == 0 { - lift(f) + for lift(f) { + } + if doSimplifyConstantCompositeValues { + for simplifyConstantCompositeValues(f) { + } + } } // emit constants after lifting, because lifting may produce new constants, but before other variable splitting, @@ -652,7 +597,6 @@ func (f *Function) RemoveNilBlocks() { // removeNilBlocks eliminates nils from f.Blocks and updates each // BasicBlock.Index. Use this after any pass that may delete blocks. -// func (f *Function) removeNilBlocks() { j := 0 for _, b := range f.Blocks { @@ -673,7 +617,6 @@ func (f *Function) removeNilBlocks() { // functions will include full debug info. This greatly increases the // size of the instruction stream, and causes Functions to depend upon // the ASTs, potentially keeping them live in memory for longer. -// func (pkg *Package) SetDebugMode(debug bool) { // TODO(adonovan): do we want ast.File granularity? pkg.debug = debug @@ -687,7 +630,6 @@ func (f *Function) debugInfo() bool { // addNamedLocal creates a local variable, adds it to function f and // returns it. Its name and type are taken from obj. Subsequent // calls to f.lookup(obj) will return the same local. -// func (f *Function) addNamedLocal(obj types.Object, source ast.Node) *Alloc { l := f.addLocal(obj.Type(), source) f.objects[obj] = l @@ -700,7 +642,6 @@ func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { // addLocal creates an anonymous local variable of type typ, adds it // to function f and returns it. pos is the optional source location. -// func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { v := &Alloc{} v.setType(types.NewPointer(typ)) @@ -713,7 +654,6 @@ func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { // that is local to function f or one of its enclosing functions. // If escaping, the reference comes from a potentially escaping pointer // expression and the referent must be heap-allocated. -// func (f *Function) lookup(obj types.Object, escaping bool) Value { if v, ok := f.objects[obj]; ok { if alloc, ok := v.(*Alloc); ok && escaping { @@ -750,13 +690,14 @@ func (f *Function) emit(instr Instruction, source ast.Node) Value { // The specific formatting rules are not guaranteed and may change. // // Examples: -// "math.IsNaN" // a package-level function -// "(*bytes.Buffer).Bytes" // a declared method or a wrapper -// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) -// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) -// "main.main$1" // an anonymous function in main -// "main.init#1" // a declared init function -// "main.init" // the synthesized package initializer +// +// "math.IsNaN" // a package-level function +// "(*bytes.Buffer).Bytes" // a declared method or a wrapper +// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0) +// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure) +// "main.main$1" // an anonymous function in main +// "main.init#1" // a declared init function +// "main.init" // the synthesized package initializer // // When these functions are referred to from within the same package // (i.e. from == f.Pkg.Object), they are rendered without the package path. @@ -766,7 +707,6 @@ func (f *Function) emit(instr Instruction, source ast.Node) Value { // (But two methods may have the same name "(T).f" if one is a synthetic // wrapper promoting a non-exported method "f" from another package; in // that case, the strings are equal but the identifiers "f" are distinct.) -// func (f *Function) RelString(from *types.Package) string { // Anonymous? if f.parent != nil { @@ -920,6 +860,10 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { default: buf.WriteString(instr.String()) } + if instr != nil && instr.Comment() != "" { + buf.WriteString(" # ") + buf.WriteString(instr.Comment()) + } buf.WriteString("\n") if f.Prog.mode&PrintSource != 0 { @@ -950,7 +894,6 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { // newBasicBlock adds to f a new basic block and returns it. It does // not automatically become the current block for subsequent calls to emit. // comment is an optional string for more readable debugging output. -// func (f *Function) newBasicBlock(comment string) *BasicBlock { var instrs []Instruction if len(f.functionBody.scratchInstructions) > 0 { @@ -985,7 +928,6 @@ func (f *Function) newBasicBlock(comment string) *BasicBlock { // "reflect" package, etc. // // TODO(adonovan): think harder about the API here. -// func (prog *Program) NewFunction(name string, sig *types.Signature, provenance Synthetic) *Function { return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} } @@ -1004,3 +946,12 @@ func (f *Function) initHTML(name string) { f.wr = NewHTMLWriter("ir.html", rel, "") } } + +func killInstruction(instr Instruction) { + ops := instr.Operands(nil) + for _, op := range ops { + if refs := (*op).Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/load.go b/vendor/honnef.co/go/tools/go/ir/irutil/load.go index 1e83effa1a..883447b7af 100644 --- a/vendor/honnef.co/go/tools/go/ir/irutil/load.go +++ b/vendor/honnef.co/go/tools/go/ir/irutil/load.go @@ -13,6 +13,7 @@ import ( "honnef.co/go/tools/go/ir" + //lint:ignore SA1019 go/loader is deprecated, but works fine for our tests "golang.org/x/tools/go/loader" "golang.org/x/tools/go/packages" ) @@ -39,7 +40,6 @@ type Options struct { // packages with well-typed syntax trees. // // The mode parameter controls diagnostics and checking during IR construction. -// func Packages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) (*ir.Program, []*ir.Package) { return doPackages(initial, mode, false, opts) } @@ -61,7 +61,6 @@ func Packages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) ( // well-typed syntax trees. // // The mode parameter controls diagnostics and checking during IR construction. -// func AllPackages(initial []*packages.Package, mode ir.BuilderMode, opts *Options) (*ir.Program, []*ir.Package) { return doPackages(initial, mode, true, opts) } @@ -112,7 +111,6 @@ func doPackages(initial []*packages.Package, mode ir.BuilderMode, deps bool, opt // // Deprecated: use golang.org/x/tools/go/packages and the Packages // function instead; see ir.ExampleLoadPackages. -// func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { prog := ir.NewProgram(lprog.Fset, mode) @@ -139,7 +137,6 @@ func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { // The operation fails if there were any type-checking or import errors. // // See ../ir/example_test.go for an example. -// func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) { if fset == nil { panic("no token.FileSet") diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/switch.go b/vendor/honnef.co/go/tools/go/ir/irutil/switch.go index e7654e0081..afe899d86e 100644 --- a/vendor/honnef.co/go/tools/go/ir/irutil/switch.go +++ b/vendor/honnef.co/go/tools/go/ir/irutil/switch.go @@ -55,7 +55,6 @@ type TypeCase struct { // A type switch may contain duplicate types, or types assignable // to an interface type also in the list. // TODO(adonovan): eliminate such duplicates. -// type Switch struct { Start *ir.BasicBlock // block containing start of if/else chain X ir.Value // the switch operand @@ -103,7 +102,6 @@ func (sw *Switch) String() string { // Switches may even be inferred from if/else- or goto-based control flow. // (In general, the control flow constructs of the source program // cannot be faithfully reproduced from the IR.) -// func Switches(fn *ir.Function) []Switch { // Traverse the CFG in dominance order, so we don't // enter an if/else-chain in the middle. @@ -227,7 +225,6 @@ func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bo // isComparisonBlock returns the operands (v, k) if a block ends with // a comparison v==k, where k is a compile-time constant. -// func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { if n := len(b.Instrs); n >= 2 { if i, ok := b.Instrs[n-1].(*ir.If); ok { @@ -246,7 +243,6 @@ func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { // isTypeAssertBlock returns the operands (y, x, T) if a block ends with // a type assertion "if y, ok := x.(T); ok {". -// func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) { if n := len(b.Instrs); n >= 4 { if i, ok := b.Instrs[n-1].(*ir.If); ok { diff --git a/vendor/honnef.co/go/tools/go/ir/irutil/visit.go b/vendor/honnef.co/go/tools/go/ir/irutil/visit.go index f6d0503ddd..f2135dca4b 100644 --- a/vendor/honnef.co/go/tools/go/ir/irutil/visit.go +++ b/vendor/honnef.co/go/tools/go/ir/irutil/visit.go @@ -18,7 +18,6 @@ import "honnef.co/go/tools/go/ir" // synthetic wrappers. // // Precondition: all packages are built. -// func AllFunctions(prog *ir.Program) map[*ir.Function]bool { visit := visitor{ prog: prog, diff --git a/vendor/honnef.co/go/tools/go/ir/lift.go b/vendor/honnef.co/go/tools/go/ir/lift.go index 8ab67eb8ee..8ac8330dc2 100644 --- a/vendor/honnef.co/go/tools/go/ir/lift.go +++ b/vendor/honnef.co/go/tools/go/ir/lift.go @@ -63,8 +63,7 @@ const debugLifting = false // // domFrontier's methods mutate the slice's elements but not its // length, so their receivers needn't be pointers. -// -type domFrontier [][]*BasicBlock +type domFrontier BlockMap[[]*BasicBlock] func (df domFrontier) add(u, v *BasicBlock) { df[u.Index] = append(df[u.Index], v) @@ -105,7 +104,7 @@ func buildDomFrontier(fn *Function) domFrontier { return df } -type postDomFrontier [][]*BasicBlock +type postDomFrontier BlockMap[[]*BasicBlock] func (rdf postDomFrontier) add(u, v *BasicBlock) { rdf[u.Index] = append(rdf[u.Index], v) @@ -156,6 +155,19 @@ func clearInstrs(instrs []Instruction) { } } +func numberNodesPerBlock(f *Function) { + for _, b := range f.Blocks { + var base ID + for _, instr := range b.Instrs { + if instr == nil { + continue + } + instr.setID(base) + base++ + } + } +} + // lift replaces local and new Allocs accessed only with // load/store by IR registers, inserting φ- and σ-nodes where necessary. // The result is a program in pruned SSI form. @@ -164,8 +176,7 @@ func clearInstrs(instrs []Instruction) { // - fn has no dead blocks (blockopt has run). // - Def/use info (Operands and Referrers) is up-to-date. // - The dominator tree is up-to-date. -// -func lift(fn *Function) { +func lift(fn *Function) bool { // TODO(adonovan): opt: lots of little optimizations may be // worthwhile here, especially if they cause us to avoid // buildDomFrontier. For example: @@ -187,8 +198,8 @@ func lift(fn *Function) { var df domFrontier var rdf postDomFrontier var closure *closure - var newPhis newPhiMap - var newSigmas newSigmaMap + var newPhis BlockMap[[]newPhi] + var newSigmas BlockMap[[]newSigma] // During this pass we will replace some BasicBlock.Instrs // (allocs, loads and stores) with nil, keeping a count in @@ -204,24 +215,35 @@ func lift(fn *Function) { // Determine which allocs we can lift and number them densely. // The renaming phase uses this numbering for compact maps. numAllocs := 0 + + instructions := make(BlockMap[liftInstructions], len(fn.Blocks)) + for i := range instructions { + instructions[i].insertInstructions = map[Instruction][]Instruction{} + } + + // Number nodes, for liftable + numberNodesPerBlock(fn) + for _, b := range fn.Blocks { b.gaps = 0 b.rundefers = 0 + for _, instr := range b.Instrs { switch instr := instr.(type) { case *Alloc: - if !liftable(instr) { + if !liftable(instr, instructions) { instr.index = -1 continue } + if numAllocs == 0 { df = buildDomFrontier(fn) rdf = buildPostDomFrontier(fn) if len(fn.Blocks) > 2 { closure = transitiveClosure(fn) } - newPhis = make(newPhiMap, len(fn.Blocks)) - newSigmas = make(newSigmaMap, len(fn.Blocks)) + newPhis = make(BlockMap[[]newPhi], len(fn.Blocks)) + newSigmas = make(BlockMap[[]newSigma], len(fn.Blocks)) if debugLifting { title := false @@ -236,7 +258,6 @@ func lift(fn *Function) { } } } - liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) instr.index = numAllocs numAllocs++ case *Defer: @@ -248,6 +269,39 @@ func lift(fn *Function) { } if numAllocs > 0 { + for _, b := range fn.Blocks { + work := instructions[b.Index] + for _, rename := range work.renameAllocs { + for _, instr_ := range b.Instrs[rename.startingAt:] { + replace(instr_, rename.from, rename.to) + } + } + } + + for _, b := range fn.Blocks { + work := instructions[b.Index] + if len(work.insertInstructions) != 0 { + newInstrs := make([]Instruction, 0, len(fn.Blocks)+len(work.insertInstructions)*3) + for _, instr := range b.Instrs { + if add, ok := work.insertInstructions[instr]; ok { + newInstrs = append(newInstrs, add...) + } + newInstrs = append(newInstrs, instr) + } + b.Instrs = newInstrs + } + } + + // TODO(dh): remove inserted allocs that end up unused after lifting. + + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*Alloc); ok && instr.index >= 0 { + liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) + } + } + } + // renaming maps an alloc (keyed by index) to its replacement // value. Initially the renaming contains nil, signifying the // zero constant of the appropriate type; we construct the @@ -380,6 +434,8 @@ func lift(fn *Function) { fn.Locals[i] = nil } fn.Locals = fn.Locals[:j] + + return numAllocs > 0 } func hasDirectReferrer(instr Instruction) bool { @@ -394,7 +450,7 @@ func hasDirectReferrer(instr Instruction) bool { return false } -func markLiveNodes(blocks []*BasicBlock, newPhis newPhiMap, newSigmas newSigmaMap) { +func markLiveNodes(blocks []*BasicBlock, newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { // Phis and sigmas may become dead due to optimization passes. We may also insert more nodes than strictly // necessary, e.g. sigma nodes for constants, which will never be used. @@ -461,7 +517,7 @@ func markLiveSigma(sigma *Sigma) { // and replaces trivial phis with non-phi alternatives. Phi // nodes where all edges are identical, or consist of only the phi // itself and one other value, may be replaced with the value. -func simplifyPhisAndSigmas(newPhis newPhiMap, newSigmas newSigmaMap) { +func simplifyPhisAndSigmas(newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { // temporary numbering of values used in phis so that we can build map keys var id ID for _, npList := range newPhis { @@ -652,7 +708,7 @@ func (s *BlockSet) Take() int { type closure struct { span []uint32 - reachables []interval + reachables BlockMap[interval] } type interval uint32 @@ -696,6 +752,8 @@ func (c closure) reachable(id int) []interval { } func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { + // TODO(dh): the 'current' argument seems to be unused + // TODO(dh): there's no reason for this to be a method visited[b.Index] = true for _, succ := range b.Succs { if visited[succ.Index] { @@ -707,7 +765,7 @@ func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { } func transitiveClosure(fn *Function) *closure { - reachable := make([]bool, len(fn.Blocks)) + reachable := make(BlockMap[bool], len(fn.Blocks)) c := &closure{} c.span = make([]uint32, len(fn.Blocks)+1) @@ -763,13 +821,65 @@ type newSigma struct { sigmas []*Sigma } -// newPhiMap records for each basic block, the set of newPhis that -// must be prepended to the block. -type newPhiMap [][]newPhi -type newSigmaMap [][]newSigma +type liftInstructions struct { + insertInstructions map[Instruction][]Instruction + renameAllocs []struct { + from *Alloc + to *Alloc + startingAt int + } +} + +// liftable determines if alloc can be lifted, and records instructions to split partially liftable allocs. +// +// In the trivial case, all uses of the alloc can be lifted. This is the case when it is only used for storing into and +// loading from. In that case, no instructions are recorded. +// +// In the more complex case, the alloc is used for storing into and loading from, but it is also used as a value, for +// example because it gets passed to a function, e.g. fn(&x). In this case, uses of the alloc fall into one of two +// categories: those that can be lifted and those that can't. A boundary forms between these two categories in the +// function's control flow: Once an unliftable use is encountered, the alloc is no longer liftable for the remainder of +// the basic block the use is in, nor in any blocks reachable from it. +// +// We record instructions that split the alloc into two allocs: one that is used in liftable uses, and one that is used +// in unliftable uses. Whenever we encounter a boundary between liftable and unliftable uses or blocks, we emit a pair +// of Load and Store that copy the value from the liftable alloc into the unliftable alloc. Taking these instructions +// into account, the normal lifting machinery will completely lift the liftable alloc, store the correct lifted values +// into the unliftable alloc, and will not at all lift the unliftable alloc. +// +// In Go syntax, the transformation looks somewhat like this: +// +// func foo() { +// x := 32 +// if cond { +// println(x) +// escape(&x) +// println(x) +// } else { +// println(x) +// } +// println(x) +// } +// +// transforms into +// +// func fooSplitAlloc() { +// x := 32 +// var x_ int +// if cond { +// println(x) +// x_ = x +// escape(&x_) +// println(x_) +// } else { +// println(x) +// x_ = x +// } +// println(x_) +// } +func liftable(alloc *Alloc, instructions BlockMap[liftInstructions]) bool { + fn := alloc.block.parent -func liftable(alloc *Alloc) bool { - fn := alloc.Parent() // Don't lift named return values in functions that defer // calls that may recover from panic. if fn.hasDefer { @@ -780,24 +890,247 @@ func liftable(alloc *Alloc) bool { } } - for _, instr := range *alloc.Referrers() { + type blockDesc struct { + // is the block (partially) unliftable, because it contains unliftable instructions or is reachable by an unliftable block + isUnliftable bool + hasLiftableLoad bool + hasLiftableOther bool + // we need to emit stores in predecessors because the unliftable use is in a phi + storeInPreds bool + + lastLiftable int + firstUnliftable int + } + blocks := make(BlockMap[blockDesc], len(fn.Blocks)) + for _, b := range fn.Blocks { + blocks[b.Index].lastLiftable = -1 + blocks[b.Index].firstUnliftable = len(b.Instrs) + 1 + } + + // Look at all uses of the alloc and deduce which blocks have liftable or unliftable instructions. + for _, instr := range alloc.referrers { + // Find the first unliftable use + + desc := &blocks[instr.Block().Index] + hasUnliftable := false + inHead := false switch instr := instr.(type) { case *Store: if instr.Val == alloc { - return false // address used as value - } - if instr.Addr != alloc { - panic("Alloc.Referrers is inconsistent") + hasUnliftable = true } case *Load: - if instr.X != alloc { - panic("Alloc.Referrers is inconsistent") + case *DebugRef: + case *Phi, *Sigma: + inHead = true + hasUnliftable = true + default: + hasUnliftable = true + } + + if hasUnliftable { + desc.isUnliftable = true + if int(instr.ID()) < desc.firstUnliftable { + desc.firstUnliftable = int(instr.ID()) + } + if inHead { + desc.storeInPreds = true + desc.firstUnliftable = 0 } + } + } + for _, instr := range alloc.referrers { + // Find the last liftable use, taking the previously calculated firstUnliftable into consideration + + desc := &blocks[instr.Block().Index] + if int(instr.ID()) >= desc.firstUnliftable { + continue + } + hasLiftable := false + switch instr := instr.(type) { + case *Store: + if instr.Val != alloc { + desc.hasLiftableOther = true + hasLiftable = true + } + case *Load: + desc.hasLiftableLoad = true + hasLiftable = true case *DebugRef: - // ok - default: - return false + desc.hasLiftableOther = true + } + if hasLiftable { + if int(instr.ID()) > desc.lastLiftable { + desc.lastLiftable = int(instr.ID()) + } + } + } + + for i := range blocks { + // Update firstUnliftable to be one after lastLiftable. We do this to include the unliftable's preceding + // DebugRefs in the renaming. + blocks[i].firstUnliftable = blocks[i].lastLiftable + 1 + } + + // If a block is reachable by a (partially) unliftable block, then the entirety of the block is unliftable. In that + // case, stores have to be inserted in the predecessors. + // + // TODO(dh): this isn't always necessary. If the block is reachable by itself, i.e. part of a loop, then if the + // Alloc instruction is itself part of that loop, then there is a subset of instructions in the loop that can be + // lifted. For example: + // + // for { + // x := 42 + // println(x) + // escape(&x) + // } + // + // The x that escapes in one iteration of the loop isn't the same x that we read from on the next iteration. + seen := make(BlockMap[bool], len(fn.Blocks)) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if seen[b.Index] { + return + } + seen[b.Index] = true + desc := &blocks[b.Index] + desc.hasLiftableLoad = false + desc.hasLiftableOther = false + desc.isUnliftable = true + desc.firstUnliftable = 0 + desc.storeInPreds = true + for _, succ := range b.Succs { + dfs(succ) + } + } + for _, b := range fn.Blocks { + if blocks[b.Index].isUnliftable { + for _, succ := range b.Succs { + dfs(succ) + } + } + } + + hasLiftableLoad := false + hasLiftableOther := false + hasUnliftable := false + for _, b := range fn.Blocks { + desc := blocks[b.Index] + hasLiftableLoad = hasLiftableLoad || desc.hasLiftableLoad + hasLiftableOther = hasLiftableOther || desc.hasLiftableOther + if desc.isUnliftable { + hasUnliftable = true + } + } + if !hasLiftableLoad && !hasLiftableOther { + // There are no liftable uses + return false + } else if !hasUnliftable { + // The alloc is entirely liftable without splitting + return true + } else if !hasLiftableLoad { + // The alloc is not entirely liftable, and the only liftable uses are stores. While some of those stores could + // get lifted away, it would also lead to an infinite loop when lifting to a fixpoint, because the newly created + // allocs also get stored into repeatable and that's their only liftable uses. + return false + } + + // We need to insert stores for the new alloc. If a (partially) unliftable block has no unliftable + // predecessors and the use isn't in a phi node, then the store can be inserted right before the unliftable use. + // Otherwise, stores have to be inserted at the end of all liftable predecessors. + + newAlloc := &Alloc{Heap: true} + newAlloc.setBlock(alloc.block) + newAlloc.setType(alloc.typ) + newAlloc.setSource(alloc.source) + newAlloc.index = -1 + newAlloc.comment = "split alloc" + + { + work := instructions[alloc.block.Index] + work.insertInstructions[alloc] = append(work.insertInstructions[alloc], newAlloc) + } + + predHasStore := make(BlockMap[bool], len(fn.Blocks)) + for _, b := range fn.Blocks { + desc := &blocks[b.Index] + bWork := &instructions[b.Index] + + if desc.isUnliftable { + bWork.renameAllocs = append(bWork.renameAllocs, struct { + from *Alloc + to *Alloc + startingAt int + }{ + alloc, newAlloc, int(desc.firstUnliftable), + }) + } + + if !desc.isUnliftable { + continue + } + + propagate := func(in *BasicBlock, before Instruction) { + load := &Load{ + X: alloc, + } + store := &Store{ + Addr: newAlloc, + Val: load, + } + load.setType(deref(alloc.typ)) + load.setBlock(in) + load.comment = "split alloc" + store.setBlock(in) + updateOperandReferrers(load) + updateOperandReferrers(store) + store.comment = "split alloc" + + entry := &instructions[in.Index] + entry.insertInstructions[before] = append(entry.insertInstructions[before], load, store) + } + + if desc.storeInPreds { + // emit stores at the end of liftable preds + for _, pred := range b.Preds { + if blocks[pred.Index].isUnliftable { + continue + } + + if !alloc.block.Dominates(pred) { + // Consider this cfg: + // + // 1 + // /| + // / | + // ↙ ↓ + // 2--→3 + // + // with an Alloc in block 2. It doesn't make sense to insert a store in block 1 for the jump to + // block 3, because 1 can never see the Alloc in the first place. + // + // Ignoring phi nodes, an Alloc always dominates all of its uses, and phi nodes don't matter here, + // because for the incoming edges that do matter, we do emit the stores. + + continue + } + + if predHasStore[pred.Index] { + // Don't generate redundant propagations. Not only is it unnecessary, it can lead to infinite loops + // when trying to lift to a fix point, because redundant stores are liftable. + continue + } + + predHasStore[pred.Index] = true + + before := pred.Instrs[len(pred.Instrs)-1] + propagate(pred, before) + } + } else { + // emit store before the first unliftable use + before := b.Instrs[desc.firstUnliftable] + propagate(b, before) } } @@ -805,7 +1138,7 @@ func liftable(alloc *Alloc) bool { } // liftAlloc lifts alloc into registers and populates newPhis and newSigmas with all the φ- and σ-nodes it may require. -func liftAlloc(closure *closure, df domFrontier, rdf postDomFrontier, alloc *Alloc, newPhis newPhiMap, newSigmas newSigmaMap) { +func liftAlloc(closure *closure, df domFrontier, rdf postDomFrontier, alloc *Alloc, newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { fn := alloc.Parent() defblocks := fn.blockset(0) @@ -950,17 +1283,28 @@ func liftAlloc(closure *closure, df domFrontier, rdf postDomFrontier, alloc *All // replaceAll replaces all intraprocedural uses of x with y, // updating x.Referrers and y.Referrers. // Precondition: x.Referrers() != nil, i.e. x must be local to some function. -// func replaceAll(x, y Value) { var rands []*Value pxrefs := x.Referrers() pyrefs := y.Referrers() for _, instr := range *pxrefs { - rands = instr.Operands(rands[:0]) // recycle storage - for _, rand := range rands { - if *rand != nil { - if *rand == x { - *rand = y + switch instr := instr.(type) { + case *CompositeValue: + // Special case CompositeValue because it might have very large lists of operands + // + // OPT(dh): this loop is still expensive for large composite values + for i, rand := range instr.Values { + if rand == x { + instr.Values[i] = y + } + } + default: + rands = instr.Operands(rands[:0]) // recycle storage + for _, rand := range rands { + if *rand != nil { + if *rand == x { + *rand = y + } } } } @@ -995,7 +1339,6 @@ func replace(instr Instruction, x, y Value) { // renamed returns the value to which alloc is being renamed, // constructing it lazily if it's the implicit zero initialization. -// func renamed(fn *Function, renaming []Value, alloc *Alloc) Value { v := renaming[alloc.index] if v == nil { @@ -1141,6 +1484,10 @@ func splitOnNewInformation(u *BasicBlock, renaming *StackMap) { // A slice to array pointer conversion tells us the minimum length of the slice rename(instr.X, instr, CopyInfoUnspecified, i) i++ + case *SliceToArray: + // A slice to array conversion tells us the minimum length of the slice + rename(instr.X, instr, CopyInfoUnspecified, i) + i++ case *Slice: // Slicing tells us about some of the bounds off := 0 @@ -1203,8 +1550,7 @@ func splitOnNewInformation(u *BasicBlock, renaming *StackMap) { // renaming is a map from *Alloc (keyed by index number) to its // dominating stored value; newPhis[x] is the set of new φ-nodes to be // prepended to block x. -// -func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap, newSigmas newSigmaMap) { +func rename(u *BasicBlock, renaming []Value, newPhis BlockMap[[]newPhi], newSigmas BlockMap[[]newSigma]) { // Each φ-node becomes the new name for its associated Alloc. for _, np := range newPhis[u.Index] { phi := np.phi @@ -1340,7 +1686,6 @@ func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap, newSigmas newSig // fresh copy of the renaming map for each subtree. r := make([]Value, len(renaming)) for _, v := range u.dom.children { - // XXX add debugging copy(r, renaming) // on entry to a block, the incoming sigma nodes become the new values for their alloc @@ -1355,3 +1700,54 @@ func rename(u *BasicBlock, renaming []Value, newPhis newPhiMap, newSigmas newSig } } + +func simplifyConstantCompositeValues(fn *Function) bool { + changed := false + + for _, b := range fn.Blocks { + n := 0 + for _, instr := range b.Instrs { + replaced := false + + if cv, ok := instr.(*CompositeValue); ok { + ac := &AggregateConst{} + ac.typ = cv.typ + replaced = true + for _, v := range cv.Values { + if c, ok := v.(Constant); ok { + ac.Values = append(ac.Values, c) + } else { + replaced = false + break + } + } + if replaced { + replaceAll(cv, emitConst(fn, ac)) + killInstruction(cv) + } + + } + + if replaced { + changed = true + } else { + b.Instrs[n] = instr + n++ + } + } + + clearInstrs(b.Instrs[n:]) + b.Instrs = b.Instrs[:n] + } + + return changed +} + +func updateOperandReferrers(instr Instruction) { + for _, op := range instr.Operands(nil) { + refs := (*op).Referrers() + if refs != nil { + *refs = append(*refs, instr) + } + } +} diff --git a/vendor/honnef.co/go/tools/go/ir/lvalue.go b/vendor/honnef.co/go/tools/go/ir/lvalue.go index f676a1f7ab..119eed6c3b 100644 --- a/vendor/honnef.co/go/tools/go/ir/lvalue.go +++ b/vendor/honnef.co/go/tools/go/ir/lvalue.go @@ -15,7 +15,6 @@ import ( // An lvalue represents an assignable location that may appear on the // left-hand side of an assignment. This is a generalization of a // pointer to permit updates to elements of maps. -// type lvalue interface { store(fn *Function, v Value, source ast.Node) // stores v into the location load(fn *Function, source ast.Node) Value // loads the contents of the location @@ -52,11 +51,38 @@ func (a *address) typ() types.Type { return deref(a.addr.Type()) } +type compositeElement struct { + cv *CompositeValue + idx int + t types.Type + expr ast.Expr +} + +func (ce *compositeElement) load(fn *Function, source ast.Node) Value { + panic("not implemented") +} + +func (ce *compositeElement) store(fn *Function, v Value, source ast.Node) { + v = emitConv(fn, v, ce.t, source) + ce.cv.Values[ce.idx] = v + if ce.expr != nil { + // store.Val is v, converted for assignability. + emitDebugRef(fn, ce.expr, v, false) + } +} + +func (ce *compositeElement) address(fn *Function) Value { + panic("not implemented") +} + +func (ce *compositeElement) typ() types.Type { + return ce.t +} + // An element is an lvalue represented by m[k], the location of an // element of a map. These locations are not addressable // since pointers cannot be formed from them, but they do support // load() and store(). -// type element struct { m, k Value // map t types.Type // map element type @@ -90,7 +116,6 @@ func (e *element) typ() types.Type { // A blank is a dummy variable whose name is "_". // It is not reified: loads are illegal and stores are ignored. -// type blank struct{} func (bl blank) load(fn *Function, source ast.Node) Value { diff --git a/vendor/honnef.co/go/tools/go/ir/methods.go b/vendor/honnef.co/go/tools/go/ir/methods.go index fa45d1b9e9..b7903c8472 100644 --- a/vendor/honnef.co/go/tools/go/ir/methods.go +++ b/vendor/honnef.co/go/tools/go/ir/methods.go @@ -11,8 +11,6 @@ import ( "go/types" "honnef.co/go/tools/analysis/lint" - - "golang.org/x/exp/typeparams" ) // MethodValue returns the Function implementing method sel, building @@ -24,7 +22,6 @@ import ( // Thread-safe. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// func (prog *Program) MethodValue(sel *types.Selection) *Function { if sel.Kind() != types.MethodVal { panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) @@ -46,7 +43,6 @@ func (prog *Program) MethodValue(sel *types.Selection) *Function { // LookupMethod returns the implementation of the method of type T // identified by (pkg, name). It returns nil if the method exists but // is abstract, and panics if T has no such method. -// func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function { sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) if sel == nil { @@ -64,7 +60,7 @@ type methodSet struct { // Precondition: !isInterface(T). // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) func (prog *Program) createMethodSet(T types.Type) *methodSet { - mset, ok := prog.methodSets.At(T).(*methodSet) + mset, ok := prog.methodSets.At(T) if !ok { mset = &methodSet{mapping: make(map[string]*Function)} prog.methodSets.Set(T, mset) @@ -104,14 +100,13 @@ func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function // Thread-safe. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// func (prog *Program) RuntimeTypes() []types.Type { prog.methodsMu.Lock() defer prog.methodsMu.Unlock() var res []types.Type - prog.methodSets.Iterate(func(T types.Type, v interface{}) { - if v.(*methodSet).complete { + prog.methodSets.Iterate(func(T types.Type, v *methodSet) { + if v.complete { res = append(res, T) } }) @@ -121,7 +116,7 @@ func (prog *Program) RuntimeTypes() []types.Type { // declaredFunc returns the concrete function/method denoted by obj. // Panic ensues if there is none. func (prog *Program) declaredFunc(obj *types.Func) *Function { - if origin := typeparams.OriginMethod(obj); origin != obj { + if origin := obj.Origin(); origin != obj { // Calling method on instantiated type, create a wrapper that calls the generic type's method base := prog.packageLevelValue(origin) return makeInstance(prog, base.(*Function), obj.Type().(*types.Signature), nil) @@ -148,7 +143,6 @@ func (prog *Program) declaredFunc(obj *types.Func) *Function { // TODO(adonovan): make this faster. It accounts for 20% of SSA build time. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) -// func (prog *Program) needMethodsOf(T types.Type) { prog.methodsMu.Lock() prog.needMethods(T, false) @@ -159,10 +153,9 @@ func (prog *Program) needMethodsOf(T types.Type) { // Recursive case: skip => don't create methods for T. // // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// func (prog *Program) needMethods(T types.Type, skip bool) { // Each package maintains its own set of types it has visited. - if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { + if prevSkip, ok := prog.runtimeTypes.At(T); ok { // needMethods(T) was previously called if !prevSkip || skip { return // already seen, with same or false 'skip' value @@ -195,7 +188,7 @@ func (prog *Program) needMethods(T types.Type, skip bool) { case *types.Basic: // nop - case *types.Interface, *typeparams.TypeParam: + case *types.Interface, *types.TypeParam: // nop---handled by recursion over method set. case *types.Pointer: diff --git a/vendor/honnef.co/go/tools/go/ir/mode.go b/vendor/honnef.co/go/tools/go/ir/mode.go index b0b2c92919..15b5a33f77 100644 --- a/vendor/honnef.co/go/tools/go/ir/mode.go +++ b/vendor/honnef.co/go/tools/go/ir/mode.go @@ -15,9 +15,8 @@ import ( // // *BuilderMode satisfies the flag.Value interface. Example: // -// var mode = ir.BuilderMode(0) -// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } -// +// var mode = ir.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } type BuilderMode uint const ( diff --git a/vendor/honnef.co/go/tools/go/ir/print.go b/vendor/honnef.co/go/tools/go/ir/print.go index c5b51ca8a8..ad23d16dcd 100644 --- a/vendor/honnef.co/go/tools/go/ir/print.go +++ b/vendor/honnef.co/go/tools/go/ir/print.go @@ -23,7 +23,6 @@ import ( // Functions (including methods) and Globals use RelString and // all types are displayed with relType, so that only cross-package // references are package-qualified. -// func relName(v Value, i Instruction) string { if v == nil { return "" @@ -174,6 +173,7 @@ func (v *ChangeType) String() string { return printConv("ChangeType", v func (v *Convert) String() string { return printConv("Convert", v, v.X) } func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) } func (v *SliceToArrayPointer) String() string { return printConv("SliceToArrayPointer", v, v.X) } +func (v *SliceToArray) String() string { return printConv("SliceToArray", v, v.X) } func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) } func (v *MakeClosure) String() string { @@ -288,8 +288,8 @@ func (s *Jump) String() string { block = s.block.Succs[0].Index } str := fmt.Sprintf("Jump → b%d", block) - if s.Comment != "" { - str = fmt.Sprintf("%s # %s", str, s.Comment) + if s.Comment() != "" { + str = fmt.Sprintf("%s # %s", str, s.Comment()) } return str } @@ -326,6 +326,31 @@ func (s *ConstantSwitch) String() string { return b.String() } +func (v *CompositeValue) String() string { + var b bytes.Buffer + from := v.Parent().pkg() + fmt.Fprintf(&b, "CompositeValue <%s>", relType(v.Type(), from)) + if v.NumSet >= len(v.Values) { + // All values provided + fmt.Fprint(&b, " [all]") + } else if v.Bitmap.BitLen() == 0 { + // No values provided + fmt.Fprint(&b, " [none]") + } else { + // Some values provided + bits := []byte(fmt.Sprintf("%0*b", len(v.Values), &v.Bitmap)) + for i := 0; i < len(bits)/2; i++ { + o := len(bits) - 1 - i + bits[i], bits[o] = bits[o], bits[i] + } + fmt.Fprintf(&b, " [%s]", bits) + } + for _, vv := range v.Values { + fmt.Fprintf(&b, " %s", relName(vv, v)) + } + return b.String() +} + func (s *TypeSwitch) String() string { from := s.Parent().pkg() var b bytes.Buffer diff --git a/vendor/honnef.co/go/tools/go/ir/sanity.go b/vendor/honnef.co/go/tools/go/ir/sanity.go index 1788d0f137..b6c59c95fc 100644 --- a/vendor/honnef.co/go/tools/go/ir/sanity.go +++ b/vendor/honnef.co/go/tools/go/ir/sanity.go @@ -13,6 +13,8 @@ import ( "io" "os" "strings" + + "honnef.co/go/tools/go/types/typeutil" ) type sanity struct { @@ -30,7 +32,6 @@ type sanity struct { // // Sanity-checking is intended to facilitate the debugging of code // transformation passes. -// func sanityCheck(fn *Function, reporter io.Writer) bool { if reporter == nil { reporter = os.Stderr @@ -40,7 +41,6 @@ func sanityCheck(fn *Function, reporter io.Writer) bool { // mustSanityCheck is like sanityCheck but panics instead of returning // a negative result. -// func mustSanityCheck(fn *Function, reporter io.Writer) { if !sanityCheck(fn, reporter) { fn.WriteTo(os.Stderr) @@ -142,11 +142,14 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *ChangeInterface: case *ChangeType: case *SliceToArrayPointer: + case *SliceToArray: case *Convert: - if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok { - if _, ok := instr.Type().Underlying().(*types.Basic); !ok { - s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type()) - } + tsetInstrX := typeutil.NewTypeSet(instr.X.Type().Underlying()) + tsetInstr := typeutil.NewTypeSet(instr.Type().Underlying()) + ok1 := tsetInstr.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + ok2 := tsetInstrX.Any(func(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Basic); return ok }) + if !ok1 && !ok2 { + s.errorf("convert %s -> %s: at least one type set must contain basic type", instr.X.Type(), instr.Type()) } case *Defer: @@ -194,6 +197,7 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *GenericConst: case *Recv: case *TypeSwitch: + case *CompositeValue: default: panic(fmt.Sprintf("Unknown instruction type: %T", instr)) } diff --git a/vendor/honnef.co/go/tools/go/ir/source.go b/vendor/honnef.co/go/tools/go/ir/source.go index 677eefbd7f..155c5f7336 100644 --- a/vendor/honnef.co/go/tools/go/ir/source.go +++ b/vendor/honnef.co/go/tools/go/ir/source.go @@ -14,8 +14,6 @@ import ( "go/ast" "go/token" "go/types" - - "golang.org/x/exp/typeparams" ) // EnclosingFunction returns the function that contains the syntax @@ -25,11 +23,10 @@ import ( // enclosed by the package's init() function. // // Returns nil if not found; reasons might include: -// - the node is not enclosed by any function. -// - the node is within an anonymous function (FuncLit) and -// its IR function has not been created yet -// (pkg.Build() has not yet been called). -// +// - the node is not enclosed by any function. +// - the node is within an anonymous function (FuncLit) and +// its IR function has not been created yet +// (pkg.Build() has not yet been called). func EnclosingFunction(pkg *Package, path []ast.Node) *Function { // Start with package-level function... fn := findEnclosingPackageLevelFunction(pkg, path) @@ -67,14 +64,12 @@ outer: // depend on whether IR code for pkg has been built, so it can be // used to quickly reject check inputs that will cause // EnclosingFunction to fail, prior to IR building. -// func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { return findEnclosingPackageLevelFunction(pkg, path) != nil } // findEnclosingPackageLevelFunction returns the Function // corresponding to the package-level function enclosing path. -// func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function { if n := len(path); n >= 2 { // [... {Gen,Func}Decl File] switch decl := path[n-2].(type) { @@ -100,7 +95,6 @@ func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function // findNamedFunc returns the named function whose FuncDecl.Ident is at // position pos. -// func findNamedFunc(pkg *Package, pos token.Pos) *Function { for _, fn := range pkg.Functions { if fn.Pos() == pos { @@ -114,13 +108,13 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // expression e. // // It returns nil if no value was found, e.g. -// - the expression is not lexically contained within f; -// - f was not built with debug information; or -// - e is a constant expression. (For efficiency, no debug -// information is stored for constants. Use -// go/types.Info.Types[e].Value instead.) -// - e is a reference to nil or a built-in function. -// - the value was optimised away. +// - the expression is not lexically contained within f; +// - f was not built with debug information; or +// - e is a constant expression. (For efficiency, no debug +// information is stored for constants. Use +// go/types.Info.Types[e].Value instead.) +// - e is a reference to nil or a built-in function. +// - the value was optimised away. // // If e is an addressable expression used in an lvalue context, // value is the address denoted by e, and isAddr is true. @@ -132,7 +126,6 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // astutil.PathEnclosingInterval to locate the ast.Node, then // EnclosingFunction to locate the Function, then ValueForExpr to find // the ir.Value.) -// func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { if f.debugInfo() { // (opt) e = unparen(e) @@ -154,7 +147,6 @@ func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { // Package returns the IR Package corresponding to the specified // type-checker package object. // It returns nil if no such IR package has been created. -// func (prog *Program) Package(obj *types.Package) *Package { return prog.packages[obj] } @@ -163,7 +155,6 @@ func (prog *Program) Package(obj *types.Package) *Package { // the specified named object, which may be a package-level const // (*Const), var (*Global) or func (*Function) of some package in // prog. It returns nil if the object is not found. -// func (prog *Program) packageLevelValue(obj types.Object) Value { if pkg, ok := prog.packages[obj.Pkg()]; ok { return pkg.values[obj] @@ -176,16 +167,14 @@ func (prog *Program) packageLevelValue(obj types.Object) Value { // // TODO(adonovan): check the invariant that obj.Type() matches the // result's Signature, both in the params/results and in the receiver. -// func (prog *Program) FuncValue(obj *types.Func) *Function { - obj = typeparams.OriginMethod(obj) + obj = obj.Origin() fn, _ := prog.packageLevelValue(obj).(*Function) return fn } // ConstValue returns the IR Value denoted by the source-level named // constant obj. -// func (prog *Program) ConstValue(obj *types.Const) *Const { // TODO(adonovan): opt: share (don't reallocate) // Consts for const objects and constant ast.Exprs. @@ -217,8 +206,9 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { // If the identifier is a field selector and its base expression is // non-addressable, then VarValue returns the value of that field. // For example: -// func f() struct {x int} -// f().x // VarValue(x) returns a *Field instruction of type int +// +// func f() struct {x int} +// f().x // VarValue(x) returns a *Field instruction of type int // // All other identifiers denote addressable locations (variables). // For them, VarValue may return either the variable's address or its @@ -227,14 +217,14 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { // // If !isAddr, the returned value is the one associated with the // specific identifier. For example, -// var x int // VarValue(x) returns Const 0 here -// x = 1 // VarValue(x) returns Const 1 here +// +// var x int // VarValue(x) returns Const 0 here +// x = 1 // VarValue(x) returns Const 1 here // // It is not specified whether the value or the address is returned in // any particular case, as it may depend upon optimizations performed // during IR code generation, such as registerization, constant // folding, avoidance of materialization of subexpressions, etc. -// func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { // All references to a var are local to some function, possibly init. fn := EnclosingFunction(pkg, ref) diff --git a/vendor/honnef.co/go/tools/go/ir/ssa.go b/vendor/honnef.co/go/tools/go/ir/ssa.go index c51dd67d33..1ef87f9e88 100644 --- a/vendor/honnef.co/go/tools/go/ir/ssa.go +++ b/vendor/honnef.co/go/tools/go/ir/ssa.go @@ -13,12 +13,18 @@ import ( "go/constant" "go/token" "go/types" + "math/big" "sync" - "golang.org/x/exp/typeparams" "honnef.co/go/tools/go/types/typeutil" ) +const ( + // Replace CompositeValue with only constant values with AggregateConst. Currently disabled because it breaks field + // tracking in U1000. + doSimplifyConstantCompositeValues = false +) + type ID int // A Program is a partial or complete Go program converted to IR form. @@ -31,9 +37,9 @@ type Program struct { MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets methodsMu sync.Mutex // guards the following maps: - methodSets typeutil.Map // maps type to its concrete methodSet - runtimeTypes typeutil.Map // types for which rtypes are needed - canon typeutil.Map // type canonicalization map + methodSets typeutil.Map[*methodSet] // maps type to its concrete methodSet + runtimeTypes typeutil.Map[bool] // types for which rtypes are needed + canon typeutil.Map[types.Type] // type canonicalization map bounds map[*types.Func]*Function // bounds for curried x.Method closures thunks map[selectionKey]*Function // thunks for T.Method expressions } @@ -46,7 +52,6 @@ type Program struct { // Members also contains entries for "init" (the synthetic package // initializer) and "init#%d", the nth declared init function, // and unspecified other things too. -// type Package struct { Prog *Program // the owning program Pkg *types.Package // the corresponding go/types.Package @@ -68,7 +73,6 @@ type Package struct { // A Member is a member of a Go package, implemented by *NamedConst, // *Global, *Function, or *Type; they are created by package-level // const, var, func and type declarations respectively. -// type Member interface { Name() string // declared name of the package member String() string // package-qualified name of the package member @@ -93,7 +97,6 @@ type Type struct { // // NB: a NamedConst is not a Value; it contains a constant Value, which // it augments with the name and position of its 'const' declaration. -// type NamedConst struct { object *types.Const Value *Const @@ -180,11 +183,12 @@ type Value interface { // An Instruction that defines a value (e.g. BinOp) also implements // the Value interface; an Instruction that only has an effect (e.g. Store) // does not. -// type Instruction interface { setSource(ast.Node) setID(ID) + Comment() string + // String returns the disassembled form of this value. // // Examples of Instructions that are Values: @@ -264,7 +268,6 @@ type Instruction interface { // Node is provided to simplify IR graph algorithms. Clients should // use the more specific and informative Value or Instruction // interfaces where appropriate. -// type Node interface { setID(ID) @@ -344,7 +347,6 @@ func (syn Synthetic) String() string { // Syntax.Pos() always returns the position of the declaring "func" token. // // Type() returns the function's Signature. -// type Function struct { node @@ -373,13 +375,13 @@ type Function struct { type instanceWrapperMap struct { h typeutil.Hasher entries map[uint32][]struct { - key *typeparams.TypeList + key *types.TypeList val *Function } len int } -func typeListIdentical(l1, l2 *typeparams.TypeList) bool { +func typeListIdentical(l1, l2 *types.TypeList) bool { if l1.Len() != l2.Len() { return false } @@ -393,10 +395,10 @@ func typeListIdentical(l1, l2 *typeparams.TypeList) bool { return true } -func (m *instanceWrapperMap) At(key *typeparams.TypeList) *Function { +func (m *instanceWrapperMap) At(key *types.TypeList) *Function { if m.entries == nil { m.entries = make(map[uint32][]struct { - key *typeparams.TypeList + key *types.TypeList val *Function }) m.h = typeutil.MakeHasher() @@ -416,10 +418,10 @@ func (m *instanceWrapperMap) At(key *typeparams.TypeList) *Function { return nil } -func (m *instanceWrapperMap) Set(key *typeparams.TypeList, val *Function) { +func (m *instanceWrapperMap) Set(key *types.TypeList, val *Function) { if m.entries == nil { m.entries = make(map[uint32][]struct { - key *typeparams.TypeList + key *types.TypeList val *Function }) m.h = typeutil.MakeHasher() @@ -437,7 +439,7 @@ func (m *instanceWrapperMap) Set(key *typeparams.TypeList, val *Function) { } } m.entries[hash] = append(m.entries[hash], struct { - key *typeparams.TypeList + key *types.TypeList val *Function }{key, val}) m.len++ @@ -456,6 +458,11 @@ const ( NeverReturns ) +type constValue struct { + c Constant + idx int +} + type functionBody struct { // The following fields are set transiently during building, // then cleared. @@ -465,11 +472,14 @@ type functionBody struct { implicitResults []*Alloc // tuple of results targets *targets // linked stack of branch targets lblocks map[types.Object]*lblock // labelled blocks - consts []Constant - wr *HTMLWriter - fakeExits BlockSet - blocksets [5]BlockSet - hasDefer bool + + consts map[constKey]constValue + aggregateConsts typeutil.Map[[]*AggregateConst] + + wr *HTMLWriter + fakeExits BlockSet + blocksets [5]BlockSet + hasDefer bool // a contiguous block of instructions that will be used by blocks, // to avoid making multiple allocations. @@ -502,7 +512,6 @@ func (fn *Function) results() []*Alloc { // // The order of Preds and Succs is significant (to Phi and If // instructions, respectively). -// type BasicBlock struct { Index int // index of this block within Parent().Blocks Comment string // optional label; no semantic significance @@ -534,7 +543,6 @@ type BasicBlock struct { // // Pos() returns the position of the value that was captured, which // belongs to an enclosing function. -// type FreeVar struct { node @@ -548,7 +556,6 @@ type FreeVar struct { } // A Parameter represents an input parameter of a function. -// type Parameter struct { register @@ -573,10 +580,10 @@ type Parameter struct { // Pos() returns token.NoPos. // // Example printed form: -// Const {42} -// Const {"test"} -// Const {(3 + 4i)} // +// Const {42} +// Const {"test"} +// Const {(3 + 4i)} type Const struct { register @@ -586,7 +593,18 @@ type Const struct { type AggregateConst struct { register - Values []Constant + Values []Value +} + +type CompositeValue struct { + register + + // Bitmap records which elements were explicitly provided. For example, [4]byte{2: x} would have a bitmap of 0010. + Bitmap big.Int + // The number of bits set in Bitmap + NumSet int + // Dense list of values in the composite literal. Omitted elements are filled in with zero values. + Values []Value } // TODO add the element's zero constant to ArrayConst @@ -617,7 +635,6 @@ func (*GenericConst) aConstant() {} // // Pos() returns the position of the ast.ValueSpec.Names[*] // identifier. -// type Global struct { node @@ -637,20 +654,19 @@ type Global struct { // Go spec (excluding "make" and "new") or one of these ir-defined // intrinsics: // -// // wrapnilchk returns ptr if non-nil, panics otherwise. -// // (For use in indirection wrappers.) -// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T +// // wrapnilchk returns ptr if non-nil, panics otherwise. +// // (For use in indirection wrappers.) +// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T // -// // noreturnWasPanic returns true if the previously called -// // function panicked, false if it exited the process. -// func ir:noreturnWasPanic() bool +// // noreturnWasPanic returns true if the previously called +// // function panicked, false if it exited the process. +// func ir:noreturnWasPanic() bool // // Object() returns a *types.Builtin for built-ins defined by the spec, // nil for others. // // Type() returns a *types.Signature representing the effective // signature of the built-in for this call. -// type Builtin struct { node @@ -687,9 +703,9 @@ type Builtin struct { // allocates a varargs slice. // // Example printed form: -// t1 = StackAlloc <*int> -// t2 = HeapAlloc <*int> (new) // +// t1 = StackAlloc <*int> +// t2 = HeapAlloc <*int> (new) type Alloc struct { register Heap bool @@ -711,8 +727,8 @@ var _ Value = (*Sigma)(nil) // Within a block, all σ-nodes must appear before all non-σ nodes. // // Example printed form: -// t2 = Sigma [#0] t1 (x) // +// t2 = Sigma [#0] t1 (x) type Sigma struct { register From *BasicBlock @@ -749,8 +765,8 @@ type Copy struct { // during SSA renaming. // // Example printed form: -// t3 = Phi 2:t1 4:t2 (x) // +// t3 = Phi 2:t1 4:t2 (x) type Phi struct { register Edges []Value // Edges[i] is value for Block().Preds[i] @@ -769,10 +785,10 @@ type Phi struct { // Pos() returns the ast.CallExpr.Lparen, if explicit in the source. // // Example printed form: -// t3 = Call <()> println t1 t2 -// t4 = Call <()> foo$1 -// t6 = Invoke t5.String // +// t3 = Call <()> println t1 t2 +// t4 = Call <()> foo$1 +// t6 = Invoke t5.String type Call struct { register Call CallCommon @@ -783,8 +799,8 @@ type Call struct { // Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. // // Example printed form: -// t3 = BinOp {+} t2 t1 // +// t3 = BinOp {+} t2 t1 type BinOp struct { register // One of: @@ -800,10 +816,9 @@ type BinOp struct { // SUB is negation. // NOT is logical negation. // -// // Example printed form: -// t2 = UnOp {^} t1 // +// t2 = UnOp {^} t1 type UnOp struct { register Op token.Token // One of: NOT SUB XOR ! - ^ @@ -817,8 +832,8 @@ type UnOp struct { // specified. // // Example printed form: -// t2 = Load t1 // +// t2 = Load t1 type Load struct { register X Value @@ -828,11 +843,11 @@ type Load struct { // change to Type(). // // Type changes are permitted: -// - between a named type and its underlying type. -// - between two named types of the same underlying type. -// - between (possibly named) pointers to identical base types. -// - from a bidirectional channel to a read- or write-channel, -// optionally adding/removing a name. +// - between a named type and its underlying type. +// - between two named types of the same underlying type. +// - between (possibly named) pointers to identical base types. +// - from a bidirectional channel to a read- or write-channel, +// optionally adding/removing a name. // // This operation cannot fail dynamically. // @@ -840,8 +855,8 @@ type Load struct { // from an explicit conversion in the source. // // Example printed form: -// t2 = ChangeType <*T> t1 // +// t2 = ChangeType <*T> t1 type ChangeType struct { register X Value @@ -852,12 +867,13 @@ type ChangeType struct { // // A conversion may change the value and representation of its operand. // Conversions are permitted: -// - between real numeric types. -// - between complex numeric types. -// - between string and []byte or []rune. -// - between pointers and unsafe.Pointer. -// - between unsafe.Pointer and uintptr. -// - from (Unicode) integer to (UTF-8) string. +// - between real numeric types. +// - between complex numeric types. +// - between string and []byte or []rune. +// - between pointers and unsafe.Pointer. +// - between unsafe.Pointer and uintptr. +// - from (Unicode) integer to (UTF-8) string. +// // A conversion may imply a type name change also. // // This operation cannot fail dynamically. @@ -869,8 +885,8 @@ type ChangeType struct { // from an explicit conversion in the source. // // Example printed form: -// t2 = Convert <[]byte> t1 // +// t2 = Convert <[]byte> t1 type Convert struct { register X Value @@ -886,8 +902,8 @@ type Convert struct { // otherwise. // // Example printed form: -// t2 = ChangeInterface t1 // +// t2 = ChangeInterface t1 type ChangeInterface struct { register X Value @@ -900,13 +916,27 @@ type ChangeInterface struct { // from an explicit conversion in the source. // // Example printed form: -// t1 = SliceToArrayPointer <*[4]byte> t1 // +// t2 = SliceToArrayPointer <*[4]byte> t1 type SliceToArrayPointer struct { register X Value } +// The SliceToArray instruction yields the conversion of slice X to +// array. +// +// Pos() returns the ast.CallExpr.Lparen, if the instruction arose +// from an explicit conversion in the source. +// +// Example printed form: +// +// t2 = SliceToArray <[4]byte> t1 +type SliceToArray struct { + register + X Value +} + // MakeInterface constructs an instance of an interface type from a // value of a concrete type. // @@ -914,14 +944,15 @@ type SliceToArrayPointer struct { // of X, and Program.MethodValue(m) to find the implementation of a method. // // To construct the zero value of an interface type T, use: -// NewConst(constant.MakeNil(), T, pos) +// +// NewConst(constant.MakeNil(), T, pos) // // Pos() returns the ast.CallExpr.Lparen, if the instruction arose // from an explicit conversion in the source. // // Example printed form: -// t2 = MakeInterface t1 // +// t2 = MakeInterface t1 type MakeInterface struct { register X Value @@ -936,9 +967,9 @@ type MakeInterface struct { // closure or the ast.SelectorExpr.Sel for a bound method closure. // // Example printed form: -// t1 = MakeClosure foo$1 t1 t2 -// t5 = MakeClosure (T).foo$bound t4 // +// t1 = MakeClosure foo$1 t1 t2 +// t5 = MakeClosure (T).foo$bound t4 type MakeClosure struct { register Fn Value // always a *Function @@ -954,9 +985,9 @@ type MakeClosure struct { // the ast.CompositeLit.Lbrack if created by a literal. // // Example printed form: -// t1 = MakeMap -// t2 = MakeMap t1 // +// t1 = MakeMap +// t2 = MakeMap t1 type MakeMap struct { register Reserve Value // initial space reservation; nil => default @@ -971,9 +1002,9 @@ type MakeMap struct { // created it. // // Example printed form: -// t3 = MakeChan t1 -// t4 = MakeChan t2 // +// t3 = MakeChan t1 +// t4 = MakeChan t2 type MakeChan struct { register Size Value // int; size of buffer; zero => synchronous. @@ -993,9 +1024,9 @@ type MakeChan struct { // created it. // // Example printed form: -// t3 = MakeSlice <[]string> t1 t2 -// t4 = MakeSlice t1 t2 // +// t3 = MakeSlice <[]string> t1 t2 +// t4 = MakeSlice t1 t2 type MakeSlice struct { register Len Value @@ -1016,8 +1047,8 @@ type MakeSlice struct { // NoPos if not explicit in the source (e.g. a variadic argument slice). // // Example printed form: -// t4 = Slice <[]int> t3 t2 t1 // +// t4 = Slice <[]int> t3 t2 t1 type Slice struct { register X Value // slice, string, or *array @@ -1038,8 +1069,8 @@ type Slice struct { // field, if explicit in the source. // // Example printed form: -// t2 = FieldAddr <*int> [0] (X) t1 // +// t2 = FieldAddr <*int> [0] (X) t1 type FieldAddr struct { register X Value // *struct @@ -1056,8 +1087,8 @@ type FieldAddr struct { // field, if explicit in the source. // // Example printed form: -// t2 = FieldAddr [0] (X) t1 // +// t2 = FieldAddr [0] (X) t1 type Field struct { register X Value // struct @@ -1079,8 +1110,8 @@ type Field struct { // explicit in the source. // // Example printed form: -// t3 = IndexAddr <*int> t2 t1 // +// t3 = IndexAddr <*int> t2 t1 type IndexAddr struct { register X Value // slice or *array, @@ -1093,8 +1124,8 @@ type IndexAddr struct { // explicit in the source. // // Example printed form: -// t3 = Index t2 t1 // +// t3 = Index t2 t1 type Index struct { register X Value // array @@ -1110,9 +1141,9 @@ type Index struct { // Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. // // Example printed form: -// t4 = MapLookup t3 t1 -// t6 = MapLookup <(string, bool)> t3 t2 // +// t4 = MapLookup t3 t1 +// t6 = MapLookup <(string, bool)> t3 t2 type MapLookup struct { register X Value // map @@ -1126,8 +1157,8 @@ type MapLookup struct { // Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. // // Example printed form: -// t3 = StringLookup t2 t1 // +// t3 = StringLookup t2 t1 type StringLookup struct { register X Value // string @@ -1136,7 +1167,6 @@ type StringLookup struct { // SelectState is a helper for Select. // It represents one goal state and its corresponding communication. -// type SelectState struct { Dir types.ChanDir // direction of case (SendOnly or RecvOnly) Chan Value // channel to use (for send or receive) @@ -1151,7 +1181,9 @@ type SelectState struct { // Let n be the number of States for which Dir==RECV and Tᵢ (0 ≤ i < n) // be the element type of each such state's Chan. // Select returns an n+2-tuple -// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1) +// +// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1) +// // The tuple's components, described below, must be accessed via the // Extract instruction. // @@ -1177,9 +1209,9 @@ type SelectState struct { // Pos() returns the ast.SelectStmt.Select. // // Example printed form: -// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1] -// t11 = SelectBlocking <(index int, ok bool)> [] // +// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1] +// t11 = SelectBlocking <(index int, ok bool)> [] type Select struct { register States []*SelectState @@ -1196,8 +1228,8 @@ type Select struct { // Pos() returns the ast.RangeStmt.For. // // Example printed form: -// t2 = Range t1 // +// t2 = Range t1 type Range struct { register X Value // string or map @@ -1219,9 +1251,9 @@ type Range struct { // The types of k and/or v may be types.Invalid. // // Example printed form: -// t5 = Next <(ok bool, k int, v rune)> t2 -// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 // +// t5 = Next <(ok bool, k int, v rune)> t2 +// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 type Next struct { register Iter Value @@ -1260,9 +1292,9 @@ type Next struct { // type-switch statement. // // Example printed form: -// t2 = TypeAssert t1 -// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 // +// t2 = TypeAssert t1 +// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 type TypeAssert struct { register X Value @@ -1277,8 +1309,8 @@ type TypeAssert struct { // MapLookup and others. // // Example printed form: -// t7 = Extract [1] (ok) t4 // +// t7 = Extract [1] (ok) t4 type Extract struct { register Tuple Value @@ -1295,11 +1327,10 @@ type Extract struct { // Pos() returns NoPos. // // Example printed form: -// Jump → b1 // +// Jump → b1 type Jump struct { anInstruction - Comment string } // The Unreachable pseudo-instruction signals that execution cannot @@ -1313,8 +1344,8 @@ type Jump struct { // containing BasicBlock. // // Example printed form: -// Unreachable → b1 // +// Unreachable → b1 type Unreachable struct { anInstruction } @@ -1329,8 +1360,8 @@ type Unreachable struct { // Pos() returns the *ast.IfStmt, if explicit in the source. // // Example printed form: -// If t2 → b1 b2 // +// If t2 → b1 b2 type If struct { anInstruction Cond Value @@ -1369,9 +1400,9 @@ type TypeSwitch struct { // Pos() returns the ast.ReturnStmt.Return, if explicit in the source. // // Example printed form: -// Return -// Return t1 t2 // +// Return +// Return t1 t2 type Return struct { anInstruction Results []Value @@ -1387,8 +1418,8 @@ type Return struct { // Pos() returns NoPos. // // Example printed form: -// RunDefers // +// RunDefers type RunDefers struct { anInstruction } @@ -1405,8 +1436,8 @@ type RunDefers struct { // in the source. // // Example printed form: -// Panic t1 // +// Panic t1 type Panic struct { anInstruction X Value // an interface{} @@ -1420,10 +1451,10 @@ type Panic struct { // Pos() returns the ast.GoStmt.Go. // // Example printed form: -// Go println t1 -// Go t3 -// GoInvoke t4.Bar t2 // +// Go println t1 +// Go t3 +// GoInvoke t4.Bar t2 type Go struct { anInstruction Call CallCommon @@ -1437,10 +1468,10 @@ type Go struct { // Pos() returns the ast.DeferStmt.Defer. // // Example printed form: -// Defer println t1 -// Defer t3 -// DeferInvoke t4.Bar t2 // +// Defer println t1 +// Defer t3 +// DeferInvoke t4.Bar t2 type Defer struct { anInstruction Call CallCommon @@ -1451,8 +1482,8 @@ type Defer struct { // Pos() returns the ast.SendStmt.Arrow, if explicit in the source. // // Example printed form: -// Send t2 t1 // +// Send t2 t1 type Send struct { anInstruction Chan, X Value @@ -1469,8 +1500,9 @@ type Send struct { // Pos() returns the ast.RangeStmt.For. // // Example printed form: -// t2 = Recv t1 -// t3 = Recv <(int, bool)> t1 +// +// t2 = Recv t1 +// t3 = Recv <(int, bool)> t1 type Recv struct { register Chan Value @@ -1486,8 +1518,8 @@ type Recv struct { // implementation choices, the details are not specified. // // Example printed form: -// Store {int} t2 t1 // +// Store {int} t2 t1 type Store struct { anInstruction Addr Value @@ -1502,8 +1534,8 @@ type Store struct { // Pos() returns NoPos. // // Example printed form: -// BlankStore t1 // +// BlankStore t1 type BlankStore struct { anInstruction Val Value @@ -1516,8 +1548,8 @@ type BlankStore struct { // if explicit in the source. // // Example printed form: -// MapUpdate t3 t1 t2 // +// MapUpdate t3 t1 t2 type MapUpdate struct { anInstruction Map Value @@ -1549,10 +1581,10 @@ type MapUpdate struct { // ordinary SSA renaming machinery.) // // Example printed form: -// ; *ast.CallExpr @ 102:9 is t5 -// ; var x float64 @ 109:72 is x -// ; address of *ast.CompositeLit @ 216:10 is t0 // +// ; *ast.CallExpr @ 102:9 is t5 +// ; var x float64 @ 109:72 is x +// ; address of *ast.CompositeLit @ 216:10 is t0 type DebugRef struct { anInstruction Expr ast.Expr // the referring expression (never *ast.ParenExpr) @@ -1570,7 +1602,6 @@ type DebugRef struct { // // Temporary names are automatically assigned to each register on // completion of building a function in IR form. -// type register struct { anInstruction typ types.Type // type of virtual register @@ -1599,7 +1630,12 @@ func (n *node) Pos() token.Pos { // It provides the implementations of the Block and setBlock methods. type anInstruction struct { node - block *BasicBlock // the basic block of this instruction + block *BasicBlock // the basic block of this instruction + comment string +} + +func (instr anInstruction) Comment() string { + return instr.comment } // CallCommon is contained by Go, Defer and Call to hold the @@ -1614,15 +1650,17 @@ type anInstruction struct { // 'func'. // // Value may be one of: -// (a) a *Function, indicating a statically dispatched call -// to a package-level function, an anonymous function, or -// a method of a named type. -// (b) a *MakeClosure, indicating an immediately applied -// function literal with free variables. -// (c) a *Builtin, indicating a statically dispatched call -// to a built-in function. -// (d) any other value, indicating a dynamically dispatched -// function call. +// +// (a) a *Function, indicating a statically dispatched call +// to a package-level function, an anonymous function, or +// a method of a named type. +// (b) a *MakeClosure, indicating an immediately applied +// function literal with free variables. +// (c) a *Builtin, indicating a statically dispatched call +// to a built-in function. +// (d) any other value, indicating a dynamically dispatched +// function call. +// // StaticCallee returns the identity of the callee in cases // (a) and (b), nil otherwise. // @@ -1630,9 +1668,10 @@ type anInstruction struct { // Args[0] contains the receiver parameter. // // Example printed form: -// t3 = Call <()> println t1 t2 -// Go t3 -// Defer t3 +// +// t3 = Call <()> println t1 t2 +// Go t3 +// Defer t3 // // 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon // represents a dynamically dispatched call to an interface method. @@ -1646,13 +1685,13 @@ type anInstruction struct { // receiver but the first true argument. // // Example printed form: -// t6 = Invoke t5.String -// GoInvoke t4.Bar t2 -// DeferInvoke t4.Bar t2 +// +// t6 = Invoke t5.String +// GoInvoke t4.Bar t2 +// DeferInvoke t4.Bar t2 // // For all calls to variadic functions (Signature().Variadic()), // the last element of Args is a slice. -// type CallCommon struct { Value Value // receiver (invoke mode) or func value (call mode) Method *types.Func // abstract method (invoke mode) @@ -1673,7 +1712,6 @@ func (c *CallCommon) IsInvoke() bool { // // In either "call" or "invoke" mode, if the callee is a method, its // receiver is represented by sig.Recv, not sig.Params().At(0). -// func (c *CallCommon) Signature() *types.Signature { if c.Method != nil { return c.Method.Type().(*types.Signature) @@ -1716,7 +1754,6 @@ func (c *CallCommon) Description() string { // The CallInstruction interface, implemented by *Go, *Defer and *Call, // exposes the common parts of function-calling instructions, // yet provides a way back to the Value defined by *Call alone. -// type CallInstruction interface { Instruction Common() *CallCommon // returns the common parts of the call @@ -1802,7 +1839,6 @@ func (c *NamedConst) RelString(from *types.Package) string { return relString(c, // Func returns the package-level function of the specified name, // or nil if not found. -// func (p *Package) Func(name string) (f *Function) { f, _ = p.Members[name].(*Function) return @@ -1810,7 +1846,6 @@ func (p *Package) Func(name string) (f *Function) { // Var returns the package-level variable of the specified name, // or nil if not found. -// func (p *Package) Var(name string) (g *Global) { g, _ = p.Members[name].(*Global) return @@ -1818,7 +1853,6 @@ func (p *Package) Var(name string) (g *Global) { // Const returns the package-level constant of the specified name, // or nil if not found. -// func (p *Package) Const(name string) (c *NamedConst) { c, _ = p.Members[name].(*NamedConst) return @@ -1826,7 +1860,6 @@ func (p *Package) Const(name string) (c *NamedConst) { // Type returns the package-level type of the specified name, // or nil if not found. -// func (p *Package) Type(name string) (t *Type) { t, _ = p.Members[name].(*Type) return @@ -1880,6 +1913,10 @@ func (v *SliceToArrayPointer) Operands(rands []*Value) []*Value { return append(rands, &v.X) } +func (v *SliceToArray) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + func (s *DebugRef) Operands(rands []*Value) []*Value { return append(rands, &s.X) } @@ -2042,13 +2079,26 @@ func (v *Load) Operands(rands []*Value) []*Value { return append(rands, &v.X) } +func (v *AggregateConst) Operands(rands []*Value) []*Value { + for i := range v.Values { + rands = append(rands, &v.Values[i]) + } + return rands +} + +func (v *CompositeValue) Operands(rands []*Value) []*Value { + for i := range v.Values { + rands = append(rands, &v.Values[i]) + } + return rands +} + // Non-Instruction Values: -func (v *Builtin) Operands(rands []*Value) []*Value { return rands } -func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } -func (v *Const) Operands(rands []*Value) []*Value { return rands } -func (v *ArrayConst) Operands(rands []*Value) []*Value { return rands } -func (v *AggregateConst) Operands(rands []*Value) []*Value { return rands } -func (v *GenericConst) Operands(rands []*Value) []*Value { return rands } -func (v *Function) Operands(rands []*Value) []*Value { return rands } -func (v *Global) Operands(rands []*Value) []*Value { return rands } -func (v *Parameter) Operands(rands []*Value) []*Value { return rands } +func (v *Builtin) Operands(rands []*Value) []*Value { return rands } +func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } +func (v *Const) Operands(rands []*Value) []*Value { return rands } +func (v *ArrayConst) Operands(rands []*Value) []*Value { return rands } +func (v *GenericConst) Operands(rands []*Value) []*Value { return rands } +func (v *Function) Operands(rands []*Value) []*Value { return rands } +func (v *Global) Operands(rands []*Value) []*Value { return rands } +func (v *Parameter) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/honnef.co/go/tools/go/ir/staticcheck.conf b/vendor/honnef.co/go/tools/go/ir/staticcheck.conf deleted file mode 100644 index d7b38bc356..0000000000 --- a/vendor/honnef.co/go/tools/go/ir/staticcheck.conf +++ /dev/null @@ -1,3 +0,0 @@ -# ssa/... is mostly imported from upstream and we don't want to -# deviate from it too much, hence disabling SA1019 -checks = ["inherit", "-SA1019"] diff --git a/vendor/honnef.co/go/tools/go/ir/util.go b/vendor/honnef.co/go/tools/go/ir/util.go index 550f6c9d88..0a733b654d 100644 --- a/vendor/honnef.co/go/tools/go/ir/util.go +++ b/vendor/honnef.co/go/tools/go/ir/util.go @@ -16,8 +16,6 @@ import ( "honnef.co/go/tools/go/ast/astutil" "honnef.co/go/tools/go/types/typeutil" - - "golang.org/x/exp/typeparams" ) //// AST utilities @@ -26,7 +24,6 @@ func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) } // isBlankIdent returns true iff e is an Ident with name "_". // They have no associated types.Object, and thus no type. -// func isBlankIdent(e ast.Expr) bool { id, ok := e.(*ast.Ident) return ok && id.Name == "_" @@ -51,7 +48,7 @@ func isInterface(T types.Type) bool { return types.IsInterface(T) } func deref(typ types.Type) types.Type { orig := typ - if t, ok := typ.(*typeparams.TypeParam); ok { + if t, ok := typ.(*types.TypeParam); ok { if ctyp := typeutil.CoreType(t); ctyp != nil { typ = ctyp } @@ -71,7 +68,6 @@ func recvType(obj *types.Func) types.Type { // returns a closure that prints the corresponding "end" message. // Call using 'defer logStack(...)()' to show builder stack on panic. // Don't forget trailing parens! -// func logStack(format string, args ...interface{}) func() { msg := fmt.Sprintf(format, args...) io.WriteString(os.Stderr, msg) @@ -99,7 +95,7 @@ func makeLen(T types.Type) *Builtin { lenParams := types.NewTuple(anonVar(T)) return &Builtin{ name: "len", - sig: types.NewSignature(nil, lenParams, lenResults, false), + sig: types.NewSignatureType(nil, nil, nil, lenParams, lenResults, false), } } @@ -147,3 +143,6 @@ func assert(x bool) { panic("failed assertion") } } + +// BlockMap is a mapping from basic blocks (identified by their indices) to values. +type BlockMap[T any] []T diff --git a/vendor/honnef.co/go/tools/go/ir/wrappers.go b/vendor/honnef.co/go/tools/go/ir/wrappers.go index 6082d07e1e..69537fb777 100644 --- a/vendor/honnef.co/go/tools/go/ir/wrappers.go +++ b/vendor/honnef.co/go/tools/go/ir/wrappers.go @@ -22,8 +22,6 @@ package ir import ( "fmt" "go/types" - - "golang.org/x/exp/typeparams" ) // -- wrappers ----------------------------------------------------------- @@ -42,7 +40,6 @@ import ( // - the result may be a thunk or a wrapper. // // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) -// func makeWrapper(prog *Program, sel *types.Selection) *Function { obj := sel.Obj().(*types.Func) // the declared function sig := sel.Type().(*types.Signature) // type of this wrapper @@ -90,7 +87,7 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { var c Call c.Call.Value = &Builtin{ name: "ir:wrapnilchk", - sig: types.NewSignature(nil, + sig: types.NewSignatureType(nil, nil, nil, types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), types.NewTuple(anonVar(sel.Recv())), false), } @@ -140,7 +137,6 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { // createParams creates parameters for wrapper method fn based on its // Signature.Params, which do not include the receiver. // start is the index of the first regular parameter to use. -// func createParams(fn *Function, start int) { tparams := fn.Signature.Params() for i, n := start, tparams.Len(); i < n; i++ { @@ -159,22 +155,21 @@ func createParams(fn *Function, start int) { // Use MakeClosure with such a wrapper to construct a bound method // closure. e.g.: // -// type T int or: type T interface { meth() } -// func (t T) meth() -// var t T -// f := t.meth -// f() // calls t.meth() +// type T int or: type T interface { meth() } +// func (t T) meth() +// var t T +// f := t.meth +// f() // calls t.meth() // // f is a closure of a synthetic wrapper defined as if by: // -// f := func() { return t.meth() } +// f := func() { return t.meth() } // // Unlike makeWrapper, makeBound need perform no indirection or field // selections because that can be done before the closure is // constructed. // // EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// func makeBound(prog *Program, obj *types.Func) *Function { prog.methodsMu.Lock() defer prog.methodsMu.Unlock() @@ -226,22 +221,21 @@ func makeBound(prog *Program, obj *types.Func) *Function { // // Precondition: sel.Kind() == types.MethodExpr. // -// type T int or: type T interface { meth() } -// func (t T) meth() -// f := T.meth -// var t T -// f(t) // calls t.meth() +// type T int or: type T interface { meth() } +// func (t T) meth() +// f := T.meth +// var t T +// f(t) // calls t.meth() // // f is a synthetic wrapper defined as if by: // -// f := func(t T) { return t.meth() } +// f := func(t T) { return t.meth() } // // TODO(adonovan): opt: currently the stub is created even when used // directly in a function call: C.f(i, 0). This is less efficient // than inlining the stub. // // EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu) -// func makeThunk(prog *Program, sel *types.Selection) *Function { if sel.Kind() != types.MethodExpr { panic(sel) @@ -259,7 +253,7 @@ func makeThunk(prog *Program, sel *types.Selection) *Function { defer prog.methodsMu.Unlock() // Canonicalize key.recv to avoid constructing duplicate thunks. - canonRecv, ok := prog.canon.At(key.recv).(types.Type) + canonRecv, ok := prog.canon.At(key.recv) if !ok { canonRecv = key.recv prog.canon.Set(key.recv, canonRecv) @@ -278,7 +272,7 @@ func makeThunk(prog *Program, sel *types.Selection) *Function { } func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { - return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) + return types.NewSignatureType(recv, nil, nil, s.Params(), s.Results(), s.Variadic()) } // selectionKey is like types.Selection but a usable map key. @@ -293,11 +287,11 @@ type selectionKey struct { // makeInstance creates a wrapper function with signature sig that calls the generic function fn. // If targs is not nil, fn is a function and targs describes the concrete type arguments. // If targs is nil, fn is a method and the type arguments are derived from the receiver. -func makeInstance(prog *Program, fn *Function, sig *types.Signature, targs *typeparams.TypeList) *Function { +func makeInstance(prog *Program, fn *Function, sig *types.Signature, targs *types.TypeList) *Function { if sig.Recv() != nil { assert(targs == nil) // Methods don't have their own type parameters, but the receiver does - targs = typeparams.NamedTypeArgs(deref(sig.Recv().Type()).(*types.Named)) + targs = deref(sig.Recv().Type()).(*types.Named).TypeArgs() } else { assert(targs != nil) } diff --git a/vendor/honnef.co/go/tools/go/ir/write.go b/vendor/honnef.co/go/tools/go/ir/write.go index b936bc9852..139c8cf32b 100644 --- a/vendor/honnef.co/go/tools/go/ir/write.go +++ b/vendor/honnef.co/go/tools/go/ir/write.go @@ -1,5 +1,5 @@ package ir func NewJump(parent *BasicBlock) *Jump { - return &Jump{anInstruction{block: parent}, ""} + return &Jump{anInstruction{block: parent}} } diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go b/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go index 9bfe8a38c2..2bf6ec6094 100644 --- a/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go +++ b/vendor/honnef.co/go/tools/go/types/typeutil/typeparams.go @@ -8,7 +8,7 @@ import ( ) type TypeSet struct { - Terms []*typeparams.Term + Terms []*types.Term empty bool } @@ -71,7 +71,7 @@ func CoreType(typ types.Type) types.Type { // All calls fn for each term in the type set and reports whether all invocations returned true. // If the type set is empty or unconstrained, All immediately returns false. -func (ts TypeSet) All(fn func(*typeparams.Term) bool) bool { +func (ts TypeSet) All(fn func(*types.Term) bool) bool { if len(ts.Terms) == 0 { return false } @@ -85,7 +85,7 @@ func (ts TypeSet) All(fn func(*typeparams.Term) bool) bool { // Any calls fn for each term in the type set and reports whether any invocation returned true. // It stops after the first call that returned true. -func (ts TypeSet) Any(fn func(*typeparams.Term) bool) bool { +func (ts TypeSet) Any(fn func(*types.Term) bool) bool { for _, term := range ts.Terms { if fn(term) { return true @@ -95,16 +95,16 @@ func (ts TypeSet) Any(fn func(*typeparams.Term) bool) bool { } // All is a wrapper for NewTypeSet(typ).All(fn). -func All(typ types.Type, fn func(*typeparams.Term) bool) bool { +func All(typ types.Type, fn func(*types.Term) bool) bool { return NewTypeSet(typ).All(fn) } // Any is a wrapper for NewTypeSet(typ).Any(fn). -func Any(typ types.Type, fn func(*typeparams.Term) bool) bool { +func Any(typ types.Type, fn func(*types.Term) bool) bool { return NewTypeSet(typ).Any(fn) } -func IsSlice(term *typeparams.Term) bool { +func IsSlice(term *types.Term) bool { _, ok := term.Type().Underlying().(*types.Slice) return ok } diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go b/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go index d35d08e00c..04d8c21ba6 100644 --- a/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go +++ b/vendor/honnef.co/go/tools/go/types/typeutil/upstream.go @@ -9,7 +9,6 @@ import ( ) type MethodSetCache = typeutil.MethodSetCache -type Map = typeutil.Map type Hasher = typeutil.Hasher func Callee(info *types.Info, call *ast.CallExpr) types.Object { @@ -23,3 +22,31 @@ func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection func MakeHasher() Hasher { return typeutil.MakeHasher() } + +type Map[V any] struct { + m typeutil.Map +} + +func (m *Map[V]) Delete(key types.Type) bool { return m.m.Delete(key) } +func (m *Map[V]) At(key types.Type) (V, bool) { + v := m.m.At(key) + if v == nil { + var zero V + return zero, false + } else { + return v.(V), true + } +} +func (m *Map[V]) Set(key types.Type, value V) { m.m.Set(key, value) } +func (m *Map[V]) Len() int { return m.m.Len() } +func (m *Map[V]) Iterate(f func(key types.Type, value V)) { + ff := func(key types.Type, value interface{}) { + f(key, value.(V)) + } + m.m.Iterate(ff) + +} +func (m *Map[V]) Keys() []types.Type { return m.m.Keys() } +func (m *Map[V]) String() string { return m.m.String() } +func (m *Map[V]) KeysString() string { return m.m.KeysString() } +func (m *Map[V]) SetHasher(h typeutil.Hasher) { m.m.SetHasher(h) } diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/util.go b/vendor/honnef.co/go/tools/go/types/typeutil/util.go index b0aca16bdb..3a2ad973bb 100644 --- a/vendor/honnef.co/go/tools/go/types/typeutil/util.go +++ b/vendor/honnef.co/go/tools/go/types/typeutil/util.go @@ -122,6 +122,8 @@ func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Fiel if field.Anonymous() { if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { out = append(out, flattenFields(s, np, seen)...) + } else { + out = append(out, Field{field, tag, np}) } } else { out = append(out, Field{field, tag, np}) diff --git a/vendor/honnef.co/go/tools/knowledge/deprecated.go b/vendor/honnef.co/go/tools/knowledge/deprecated.go index 7412a86e94..caeb49975c 100644 --- a/vendor/honnef.co/go/tools/knowledge/deprecated.go +++ b/vendor/honnef.co/go/tools/knowledge/deprecated.go @@ -19,6 +19,9 @@ type Deprecation struct { // go/importer.ForCompiler contains "Deprecated:", but it refers to a single argument, not the whole function. // Luckily, the notice starts in the middle of a paragraph, and as such isn't detected by us. +// TODO(dh): StdlibDeprecations doesn't contain entries for internal packages and unexported API. That's fine for normal +// users, but makes the Deprecated check less useful for people working on Go itself. + // StdlibDeprecations contains a mapping of Go API (such as variables, methods, or fields, among others) // to information about when it has been deprecated. var StdlibDeprecations = map[string]Deprecation{ @@ -85,8 +88,19 @@ var StdlibDeprecations = map[string]Deprecation{ "net/http.ErrUnexpectedTrailer": {12, DeprecatedUseNoLonger}, "net/http.CloseNotifier": {11, 7}, // This is hairy. The notice says "Not all errors in the http package related to protocol errors are of type ProtocolError", but doesn't that imply that some errors do? - "net/http.ProtocolError": {8, DeprecatedUseNoLonger}, - "(crypto/x509.CertificateRequest).Attributes": {5, 3}, + "net/http.ProtocolError": {8, DeprecatedUseNoLonger}, + "(crypto/x509.CertificateRequest).Attributes": {5, 3}, + "(*crypto/x509.Certificate).CheckCRLSignature": {19, 19}, + "crypto/x509.ParseCRL": {19, 19}, + "crypto/x509.ParseDERCRL": {19, 19}, + "(*crypto/x509.Certificate).CreateCRL": {19, 19}, + "crypto/x509/pkix.TBSCertificateList": {19, 19}, + "crypto/x509/pkix.RevokedCertificate": {19, 19}, + "go/doc.ToHTML": {20, 20}, + "go/doc.ToText": {20, 20}, + "go/doc.Synopsis": {20, 20}, + "math/rand.Seed": {20, 0}, + "math/rand.Read": {20, DeprecatedNeverUse}, // These functions have no direct alternative, but they are insecure and should no longer be used. "crypto/x509.IsEncryptedPEMBlock": {16, DeprecatedNeverUse}, @@ -150,8 +164,9 @@ var StdlibDeprecations = map[string]Deprecation{ "syscall.GetQueuedCompletionStatus": {17, 0}, "syscall.CreateIoCompletionPort": {17, 0}, - // Not marked as deprecated with a recognizable header, but deprecated nonetheless. - "io/ioutil": {16, 16}, + // We choose to only track the package itself, even though all functions are derecated individually, too. Anyone + // using ioutil directly will have to import it, and this keeps the noise down. + "io/ioutil": {19, 19}, "bytes.Title": {18, 0}, "strings.Title": {18, 0}, @@ -172,53 +187,58 @@ var StdlibDeprecations = map[string]Deprecation{ "syscall.Syscall18": {18, 18}, "syscall.Syscall6": {18, 18}, "syscall.Syscall9": {18, 18}, + + "reflect.SliceHeader": {21, 17}, + "reflect.StringHeader": {21, 20}, + "crypto/elliptic.GenerateKey": {21, 21}, + "crypto/elliptic.Marshal": {21, 21}, + "crypto/elliptic.Unmarshal": {21, 21}, + "(*crypto/elliptic.CurveParams).Add": {21, 21}, + "(*crypto/elliptic.CurveParams).Double": {21, 21}, + "(*crypto/elliptic.CurveParams).IsOnCurve": {21, 21}, + "(*crypto/elliptic.CurveParams).ScalarBaseMult": {21, 21}, + "(*crypto/elliptic.CurveParams).ScalarMult": {21, 21}, + "crypto/rsa.GenerateMultiPrimeKey": {21, DeprecatedNeverUse}, + "(crypto/rsa.PrecomputedValues).CRTValues": {21, DeprecatedNeverUse}, + "(crypto/x509.RevocationList).RevokedCertificates": {21, 21}, } -// Last imported from Go at 4aa1efed4853ea067d665a952eee77c52faac774 with the following numbers of deprecations: +// Last imported from Go at c19c4c566c63818dfd059b352e52c4710eecf14d with the following numbers of deprecations: // // archive/tar/common.go:2 // archive/zip/struct.go:6 // bytes/bytes.go:1 -// cmd/compile/internal/ir/expr.go:1 -// cmd/compile/internal/ir/type.go:1 -// cmd/compile/internal/syntax/walk.go:1 -// cmd/compile/internal/types/sym.go:2 -// cmd/go/internal/modcmd/edit.go:1 -// cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt:2 -// cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt:2 -// cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt:2 -// cmd/go/testdata/script/mod_deprecate_message.txt:4 -// cmd/go/testdata/script/mod_edit.txt:1 -// cmd/go/testdata/script/mod_list_deprecated.txt:2 -// cmd/go/testdata/script/mod_list_deprecated_replace.txt:1 -// cmd/internal/obj/link.go:5 -// cmd/internal/obj/textflag.go:1 -// cmd/vendor/golang.org/x/mod/modfile/rule.go:2 -// cmd/vendor/golang.org/x/mod/semver/semver.go:1 -// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go:1 -// cmd/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go:1 -// cmd/vendor/golang.org/x/sys/windows/security_windows.go:1 -// cmd/vendor/golang.org/x/sys/windows/syscall_windows.go:2 // compress/flate/inflate.go:2 // crypto/dsa/dsa.go:1 +// crypto/elliptic/elliptic.go:8 +// crypto/elliptic/params.go:5 // crypto/rc4/rc4.go:1 -// crypto/tls/common.go:7 +// crypto/rsa/rsa.go:2 +// crypto/tls/common.go:6 // crypto/x509/cert_pool.go:1 // crypto/x509/pem_decrypt.go:3 -// crypto/x509/x509.go:1 +// crypto/x509/pkix/pkix.go:2 +// crypto/x509/x509.go:6 // database/sql/driver/driver.go:6 // debug/gosym/pclntab.go:2 // encoding/csv/reader.go:2 // encoding/json/decode.go:1 // encoding/json/encode.go:1 +// go/build/build.go:1 +// go/doc/comment.go:2 // go/doc/doc.go:1 +// go/doc/synopsis.go:1 // go/importer/importer.go:2 -// go/types/errorcodes.go:1 // go/types/interface.go:2 // go/types/signature.go:1 // image/geom.go:2 // image/jpeg/reader.go:1 +// internal/types/errors/codes.go:1 +// io/ioutil/ioutil.go:7 +// io/ioutil/tempfile.go:2 +// math/rand/rand.go:2 // net/dial.go:2 +// net/http/h2_bundle.go:1 // net/http/httptest/recorder.go:1 // net/http/httputil/persist.go:8 // net/http/request.go:6 @@ -230,7 +250,7 @@ var StdlibDeprecations = map[string]Deprecation{ // path/filepath/path_plan9.go:1 // path/filepath/path_unix.go:1 // path/filepath/path_windows.go:1 -// reflect/value.go:1 +// reflect/value.go:3 // regexp/regexp.go:1 // runtime/cpuprof.go:1 // strings/strings.go:1 @@ -249,5 +269,4 @@ var StdlibDeprecations = map[string]Deprecation{ // syscall/syscall.go:3 // syscall/syscall_windows.go:6 // text/template/parse/node.go:5 -// vendor/golang.org/x/crypto/curve25519/curve25519.go:1 // vendor/golang.org/x/text/transform/transform.go:1 diff --git a/vendor/honnef.co/go/tools/knowledge/signatures.go b/vendor/honnef.co/go/tools/knowledge/signatures.go index d072e61a46..03f4d53e86 100644 --- a/vendor/honnef.co/go/tools/knowledge/signatures.go +++ b/vendor/honnef.co/go/tools/knowledge/signatures.go @@ -6,7 +6,7 @@ import ( ) var Signatures = map[string]*types.Signature{ - "(io.Seeker).Seek": types.NewSignature(nil, + "(io.Seeker).Seek": types.NewSignatureType(nil, nil, nil, types.NewTuple( types.NewParam(token.NoPos, nil, "", types.Typ[types.Int64]), types.NewParam(token.NoPos, nil, "", types.Typ[types.Int]), @@ -18,7 +18,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(io.Writer).Write": types.NewSignature(nil, + "(io.Writer).Write": types.NewSignatureType(nil, nil, nil, types.NewTuple( types.NewParam(token.NoPos, nil, "", types.NewSlice(types.Typ[types.Byte])), ), @@ -29,7 +29,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(io.StringWriter).WriteString": types.NewSignature(nil, + "(io.StringWriter).WriteString": types.NewSignatureType(nil, nil, nil, types.NewTuple( types.NewParam(token.NoPos, nil, "", types.Typ[types.String]), ), @@ -40,7 +40,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(encoding.TextMarshaler).MarshalText": types.NewSignature(nil, + "(encoding.TextMarshaler).MarshalText": types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple( types.NewParam(token.NoPos, nil, "", types.NewSlice(types.Typ[types.Byte])), @@ -49,7 +49,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(encoding/json.Marshaler).MarshalJSON": types.NewSignature(nil, + "(encoding/json.Marshaler).MarshalJSON": types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple( types.NewParam(token.NoPos, nil, "", types.NewSlice(types.Typ[types.Byte])), @@ -58,7 +58,7 @@ var Signatures = map[string]*types.Signature{ false, ), - "(fmt.Stringer).String": types.NewSignature(nil, + "(fmt.Stringer).String": types.NewSignatureType(nil, nil, nil, types.NewTuple(), types.NewTuple( types.NewParam(token.NoPos, nil, "", types.Typ[types.String]), diff --git a/vendor/honnef.co/go/tools/pattern/convert.go b/vendor/honnef.co/go/tools/pattern/convert.go index 34e2cd4566..aed3617cd2 100644 --- a/vendor/honnef.co/go/tools/pattern/convert.go +++ b/vendor/honnef.co/go/tools/pattern/convert.go @@ -6,8 +6,6 @@ import ( "go/token" "go/types" "reflect" - - "golang.org/x/exp/typeparams" ) var astTypes = map[string]reflect.Type{ @@ -15,7 +13,7 @@ var astTypes = map[string]reflect.Type{ "RangeStmt": reflect.TypeOf(ast.RangeStmt{}), "AssignStmt": reflect.TypeOf(ast.AssignStmt{}), "IndexExpr": reflect.TypeOf(ast.IndexExpr{}), - "IndexListExpr": reflect.TypeOf(typeparams.IndexListExpr{}), + "IndexListExpr": reflect.TypeOf(ast.IndexListExpr{}), "Ident": reflect.TypeOf(ast.Ident{}), "ValueSpec": reflect.TypeOf(ast.ValueSpec{}), "GenDecl": reflect.TypeOf(ast.GenDecl{}), diff --git a/vendor/honnef.co/go/tools/pattern/doc.go b/vendor/honnef.co/go/tools/pattern/doc.go index c963bf716b..22fe2cf30a 100644 --- a/vendor/honnef.co/go/tools/pattern/doc.go +++ b/vendor/honnef.co/go/tools/pattern/doc.go @@ -1,7 +1,7 @@ /* Package pattern implements a simple language for pattern matching Go ASTs. -Design decisions and trade-offs +# Design decisions and trade-offs The language is designed specifically for the task of filtering ASTs to simplify the implementation of analyses in staticcheck. @@ -14,7 +14,7 @@ Furthermore, it is fully expected that the majority of analyses will still requi to further process the filtered AST, to make use of type information and to enforce complex invariants. It is not our goal to design a scripting language for writing entire checks in. -The language +# The language At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching. Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node. @@ -60,13 +60,13 @@ Thus, the two following forms have identical matching behavior: This section serves as an overview of the language's syntax. More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections. -Pattern matching +# Pattern matching -TODO write about pattern matching +# TODO write about pattern matching - inspired by haskell syntax, but much, much simpler and naive -Node types +# Node types The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic. @@ -246,7 +246,7 @@ The Not node negates a match. For example, (Not (Ident _)) will match all nodes ChanDir(0) -Automatic unnesting of AST nodes +# Automatic unnesting of AST nodes The Go AST has several types of nodes that wrap other nodes. To simplify matching, we automatically unwrap some of these nodes. @@ -268,6 +268,5 @@ On the flip-side, there is no way to specifically match these wrapper nodes. For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code: ((x)) += 2 - */ package pattern diff --git a/vendor/honnef.co/go/tools/pattern/fuzz.go b/vendor/honnef.co/go/tools/pattern/fuzz.go deleted file mode 100644 index 2afbb5242c..0000000000 --- a/vendor/honnef.co/go/tools/pattern/fuzz.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package pattern - -import ( - "go/ast" - goparser "go/parser" - "go/token" - "os" - "path/filepath" - "strings" -) - -var files []*ast.File - -func init() { - fset := token.NewFileSet() - filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error { - if err != nil { - // XXX error handling - panic(err) - } - if !strings.HasSuffix(path, ".go") { - return nil - } - f, err := goparser.ParseFile(fset, path, nil, 0) - if err != nil { - return nil - } - files = append(files, f) - return nil - }) -} - -func Fuzz(data []byte) int { - p := &Parser{} - pat, err := p.Parse(string(data)) - if err != nil { - if strings.Contains(err.Error(), "internal error") { - panic(err) - } - return 0 - } - _ = pat.Root.String() - - for _, f := range files { - Match(pat.Root, f) - } - return 1 -} diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go index b4edac9489..3eae3bd631 100644 --- a/vendor/honnef.co/go/tools/pattern/match.go +++ b/vendor/honnef.co/go/tools/pattern/match.go @@ -7,7 +7,7 @@ import ( "go/types" "reflect" - "golang.org/x/exp/typeparams" + "golang.org/x/tools/go/ast/astutil" ) var tokensByString = map[string]Token{ @@ -87,35 +87,56 @@ type matcher interface { Match(*Matcher, interface{}) (interface{}, bool) } -type State = map[string]interface{} +type State = map[string]any type Matcher struct { TypesInfo *types.Info State State + + bindingsMapping []string + + setBindings []uint64 } -func (m *Matcher) fork() *Matcher { - state := make(State, len(m.State)) - for k, v := range m.State { - state[k] = v - } - return &Matcher{ - TypesInfo: m.TypesInfo, - State: state, +func (m *Matcher) set(b Binding, value interface{}) { + m.State[b.Name] = value + m.setBindings[len(m.setBindings)-1] |= 1 << b.idx +} + +func (m *Matcher) push() { + m.setBindings = append(m.setBindings, 0) +} + +func (m *Matcher) pop() { + set := m.setBindings[len(m.setBindings)-1] + if set != 0 { + for i := 0; i < len(m.bindingsMapping); i++ { + if (set & (1 << i)) != 0 { + key := m.bindingsMapping[i] + delete(m.State, key) + } + } } + m.setBindings = m.setBindings[:len(m.setBindings)-1] } -func (m *Matcher) merge(mc *Matcher) { - m.State = mc.State +func (m *Matcher) merge() { + m.setBindings = m.setBindings[:len(m.setBindings)-1] } -func (m *Matcher) Match(a Node, b ast.Node) bool { +func (m *Matcher) Match(a Pattern, b ast.Node) bool { + m.bindingsMapping = a.Bindings m.State = State{} - _, ok := match(m, a, b) + m.push() + _, ok := match(m, a.Root, b) + m.merge() + if len(m.setBindings) != 0 { + panic(fmt.Sprintf("%d entries left on the stack, expected none", len(m.setBindings))) + } return ok } -func Match(a Node, b ast.Node) (*Matcher, bool) { +func Match(a Pattern, b ast.Node) (*Matcher, bool) { m := &Matcher{} ret := m.Match(a, b) return m, ret @@ -139,7 +160,11 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { case *ast.BlockStmt: return match(m, l.List, r) case *ast.FieldList: - return match(m, l.List, r) + if l == nil { + return match(m, nil, r) + } else { + return match(m, l.List, r) + } } switch r := r.(type) { @@ -202,14 +227,24 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { } } + // TODO(dh): the three blocks handling slices can be combined into a single block if we use reflection + { ln, ok1 := l.([]ast.Expr) rn, ok2 := r.([]ast.Expr) if ok1 || ok2 { if ok1 && !ok2 { - rn = []ast.Expr{r.(ast.Expr)} + cast, ok := r.(ast.Expr) + if !ok { + return nil, false + } + rn = []ast.Expr{cast} } else if !ok1 && ok2 { - ln = []ast.Expr{l.(ast.Expr)} + cast, ok := l.(ast.Expr) + if !ok { + return nil, false + } + ln = []ast.Expr{cast} } if len(ln) != len(rn) { @@ -229,9 +264,17 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { rn, ok2 := r.([]ast.Stmt) if ok1 || ok2 { if ok1 && !ok2 { - rn = []ast.Stmt{r.(ast.Stmt)} + cast, ok := r.(ast.Stmt) + if !ok { + return nil, false + } + rn = []ast.Stmt{cast} } else if !ok1 && ok2 { - ln = []ast.Stmt{l.(ast.Stmt)} + cast, ok := l.(ast.Stmt) + if !ok { + return nil, false + } + ln = []ast.Stmt{cast} } if len(ln) != len(rn) { @@ -251,9 +294,17 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { rn, ok2 := r.([]*ast.Field) if ok1 || ok2 { if ok1 && !ok2 { - rn = []*ast.Field{r.(*ast.Field)} + cast, ok := r.(*ast.Field) + if !ok { + return nil, false + } + rn = []*ast.Field{cast} } else if !ok1 && ok2 { - ln = []*ast.Field{l.(*ast.Field)} + cast, ok := l.(*ast.Field) + if !ok { + return nil, false + } + ln = []*ast.Field{cast} } if len(ln) != len(rn) { @@ -268,7 +319,7 @@ func match(m *Matcher, l, r interface{}) (interface{}, bool) { } } - panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r)) + return nil, false } // Match a Node with an AST node @@ -286,6 +337,13 @@ func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { // 'a' is not a List or we'd be using its Match // implementation. + if len(b) != 1 { + return nil, false + } + return match(m, a, b[0]) + case []*ast.Field: + // 'a' is not a List or we'd be using its Match + // implementation if len(b) != 1 { return nil, false } @@ -317,6 +375,9 @@ func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { return b, true case nil: return nil, a == Nil{} + case string, token.Token: + // 'a' can't be a String, Token, or Binding or we'd be using their Match implementations. + return nil, false default: panic(fmt.Sprintf("unhandled type %T", b)) } @@ -393,7 +454,7 @@ func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) { } new, ret := match(m, b.Node, node) if ret { - m.State[b.Name] = new + m.set(b, new) } return new, ret } @@ -446,7 +507,16 @@ func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) { } func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) { - return nil, isNil(node) || reflect.ValueOf(node).IsNil() + if isNil(node) { + return nil, true + } + v := reflect.ValueOf(node) + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Pointer, reflect.Slice: + return nil, v.IsNil() + default: + return nil, false + } } func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) { @@ -492,13 +562,14 @@ func (fn Symbol) Match(m *Matcher, node interface{}) (interface{}, bool) { return nil, false } - fun := r + fun := r.(ast.Expr) switch idx := fun.(type) { case *ast.IndexExpr: fun = idx.X - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: fun = idx.X } + fun = astutil.Unparen(fun) switch fun := fun.(type) { case *ast.Ident: @@ -540,10 +611,12 @@ func (fn Symbol) Match(m *Matcher, node interface{}) (interface{}, bool) { func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) { for _, opt := range or.Nodes { - mc := m.fork() - if ret, ok := match(mc, opt, node); ok { - m.merge(mc) + m.push() + if ret, ok := match(m, opt, node); ok { + m.merge() return ret, true + } else { + m.pop() } } return nil, false diff --git a/vendor/honnef.co/go/tools/pattern/parser.go b/vendor/honnef.co/go/tools/pattern/parser.go index 2529051df0..ba08975240 100644 --- a/vendor/honnef.co/go/tools/pattern/parser.go +++ b/vendor/honnef.co/go/tools/pattern/parser.go @@ -1,6 +1,7 @@ package pattern import ( + "errors" "fmt" "go/ast" "go/token" @@ -11,7 +12,10 @@ type Pattern struct { Root Node // Relevant contains instances of ast.Node that could potentially // initiate a successful match of the pattern. - Relevant []reflect.Type + Relevant map[reflect.Type]struct{} + + // Mapping from binding index to binding name + Bindings []string } func MustParse(s string) Pattern { @@ -23,27 +27,29 @@ func MustParse(s string) Pattern { return pat } -func roots(node Node) []reflect.Type { +func roots(node Node, m map[reflect.Type]struct{}) { switch node := node.(type) { case Or: - var out []reflect.Type for _, el := range node.Nodes { - out = append(out, roots(el)...) + roots(el, m) } - return out case Not: - return roots(node.Node) + roots(node.Node, m) case Binding: - return roots(node.Node) + roots(node.Node, m) case Nil, nil: // this branch is reached via bindings - return allTypes + for _, T := range allTypes { + m[T] = struct{}{} + } default: Ts, ok := nodeToASTTypes[reflect.TypeOf(node)] if !ok { panic(fmt.Sprintf("internal error: unhandled type %T", node)) } - return Ts + for _, T := range Ts { + m[T] = struct{}{} + } } } @@ -160,6 +166,20 @@ type Parser struct { cur item last *item items chan item + + bindings map[string]int +} + +func (p *Parser) bindingIndex(name string) int { + if p.bindings == nil { + p.bindings = map[string]int{} + } + if idx, ok := p.bindings[name]; ok { + return idx + } + idx := len(p.bindings) + p.bindings[name] = idx + return idx } func (p *Parser) Parse(s string) (Pattern, error) { @@ -185,9 +205,22 @@ func (p *Parser) Parse(s string) (Pattern, error) { if item := <-p.lex.items; item.typ != itemEOF { return Pattern{}, fmt.Errorf("unexpected token %s after end of pattern", item.typ) } + + if len(p.bindings) > 64 { + return Pattern{}, errors.New("encountered more than 64 bindings") + } + + bindings := make([]string, len(p.bindings)) + for name, idx := range p.bindings { + bindings[idx] = name + } + + relevant := map[reflect.Type]struct{}{} + roots(root, relevant) return Pattern{ Root: root, - Relevant: roots(root), + Relevant: relevant, + Bindings: bindings, }, nil } @@ -263,7 +296,14 @@ func (p *Parser) node() (Node, error) { } } - return p.populateNode(typ.val, objs) + node, err := p.populateNode(typ.val, objs) + if err != nil { + return nil, err + } + if node, ok := node.(Binding); ok { + node.idx = p.bindingIndex(node.Name) + } + return node, nil } func populateNode(typ string, objs []Node, allowTypeInfo bool) (Node, error) { @@ -287,10 +327,23 @@ func populateNode(typ string, objs []Node, allowTypeInfo bool) (Node, error) { return v.Interface().(Node), nil } } - if len(objs) != v.NumField() { - return nil, fmt.Errorf("tried to initialize node %s with %d values, expected %d", typ, len(objs), v.NumField()) + + n := -1 + for i := 0; i < T.NumField(); i++ { + if !T.Field(i).IsExported() { + break + } + n = i + } + + if len(objs) != n+1 { + return nil, fmt.Errorf("tried to initialize node %s with %d values, expected %d", typ, len(objs), n+1) } + for i := 0; i < v.NumField(); i++ { + if !T.Field(i).IsExported() { + break + } f := v.Field(i) if f.Kind() == reflect.String { if obj, ok := objs[i].(String); ok { @@ -399,10 +452,14 @@ func (p *Parser) object() (Node, error) { b = Binding{ Name: v.val, Node: o, + idx: p.bindingIndex(v.val), } } else { p.rewind() - b = Binding{Name: v.val} + b = Binding{ + Name: v.val, + idx: p.bindingIndex(v.val), + } } if p.peek().typ == itemColon { p.next() diff --git a/vendor/honnef.co/go/tools/pattern/pattern.go b/vendor/honnef.co/go/tools/pattern/pattern.go index fbbafdfac5..15886b1f3d 100644 --- a/vendor/honnef.co/go/tools/pattern/pattern.go +++ b/vendor/honnef.co/go/tools/pattern/pattern.go @@ -245,6 +245,8 @@ type SendStmt struct { type Binding struct { Name string Node Node + + idx int } type RangeStmt struct { diff --git a/vendor/honnef.co/go/tools/printf/printf.go b/vendor/honnef.co/go/tools/printf/printf.go index 754db9b16d..3ce4dc0186 100644 --- a/vendor/honnef.co/go/tools/printf/printf.go +++ b/vendor/honnef.co/go/tools/printf/printf.go @@ -2,25 +2,26 @@ // strings. // // It parses verbs according to the following syntax: -// Numeric -> '0'-'9' -// Letter -> 'a'-'z' | 'A'-'Z' -// Index -> '[' Numeric+ ']' -// Star -> '*' -// Star -> Index '*' // -// Precision -> Numeric+ | Star -// Width -> Numeric+ | Star +// Numeric -> '0'-'9' +// Letter -> 'a'-'z' | 'A'-'Z' +// Index -> '[' Numeric+ ']' +// Star -> '*' +// Star -> Index '*' // -// WidthAndPrecision -> Width '.' Precision -// WidthAndPrecision -> Width '.' -// WidthAndPrecision -> Width -// WidthAndPrecision -> '.' Precision -// WidthAndPrecision -> '.' +// Precision -> Numeric+ | Star +// Width -> Numeric+ | Star // -// Flag -> '+' | '-' | '#' | ' ' | '0' -// Verb -> Letter | '%' +// WidthAndPrecision -> Width '.' Precision +// WidthAndPrecision -> Width '.' +// WidthAndPrecision -> Width +// WidthAndPrecision -> '.' Precision +// WidthAndPrecision -> '.' // -// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb +// Flag -> '+' | '-' | '#' | ' ' | '0' +// Verb -> Letter | '%' +// +// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb package printf import ( diff --git a/vendor/honnef.co/go/tools/simple/analysis.go b/vendor/honnef.co/go/tools/simple/analysis.go index a67a3209fb..04ac2f5e06 100644 --- a/vendor/honnef.co/go/tools/simple/analysis.go +++ b/vendor/honnef.co/go/tools/simple/analysis.go @@ -4,6 +4,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "honnef.co/go/tools/analysis/facts/generated" + "honnef.co/go/tools/analysis/facts/purity" "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/internal/passes/buildir" ) @@ -55,7 +56,7 @@ var Analyzers = lint.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{ }, "S1011": { Run: CheckLoopAppend, - Requires: []*analysis.Analyzer{inspect.Analyzer, generated.Analyzer}, + Requires: []*analysis.Analyzer{inspect.Analyzer, generated.Analyzer, purity.Analyzer}, }, "S1012": { Run: CheckTimeSince, diff --git a/vendor/honnef.co/go/tools/simple/doc.go b/vendor/honnef.co/go/tools/simple/doc.go index 11cc70c3a1..421f851344 100644 --- a/vendor/honnef.co/go/tools/simple/doc.go +++ b/vendor/honnef.co/go/tools/simple/doc.go @@ -137,8 +137,21 @@ making \'s[n:len(s)]\' and \'s[n:]\' equivalent.`, Before: ` for _, e := range y { x = append(x, e) +} + +for i := range y { + x = append(x, y[i]) +} + +for i := range y { + v := y[i] + x = append(x, v) }`, - After: `x = append(x, y...)`, + + After: ` +x = append(x, y...) +x = append(x, y...) +x = append(x, y...)`, Since: "2017.1", // MergeIfAll because y might not be a slice under all build tags. MergeIf: lint.MergeIfAll, @@ -267,9 +280,9 @@ Given the following shared definitions type T1 string type T2 int - + func (T2) String() string { return "Hello, world" } - + var x string var y T1 var z T2 diff --git a/vendor/honnef.co/go/tools/simple/lint.go b/vendor/honnef.co/go/tools/simple/lint.go index 0abc5aef91..8458f7be69 100644 --- a/vendor/honnef.co/go/tools/simple/lint.go +++ b/vendor/honnef.co/go/tools/simple/lint.go @@ -13,6 +13,7 @@ import ( "honnef.co/go/tools/analysis/code" "honnef.co/go/tools/analysis/edit" + "honnef.co/go/tools/analysis/facts/purity" "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/analysis/report" "honnef.co/go/tools/go/ast/astutil" @@ -175,20 +176,20 @@ func CheckLoopCopy(pass *analysis.Pass) (interface{}, error) { report.ShortRange(), report.Fixes(edit.Fix("replace loop with assignment", edit.ReplaceWithNode(pass.Fset, node, r)))) } else { - opts := []report.Option{ - report.ShortRange(), - report.FilterGenerated(), - } tv, err := types.Eval(pass.Fset, pass.Pkg, node.Pos(), "copy") if err == nil && tv.IsBuiltin() { + to := "to" + from := "from" src := m.State["src"].(ast.Expr) if TsrcArray { + from = "from[:]" src = &ast.SliceExpr{ X: src, } } dst := m.State["dst"].(ast.Expr) if TdstArray { + to = "to[:]" dst = &ast.SliceExpr{ X: dst, } @@ -198,9 +199,13 @@ func CheckLoopCopy(pass *analysis.Pass) (interface{}, error) { Fun: &ast.Ident{Name: "copy"}, Args: []ast.Expr{dst, src}, } - opts = append(opts, report.Fixes(edit.Fix("replace loop with call to copy()", edit.ReplaceWithNode(pass.Fset, node, r)))) + opts := []report.Option{ + report.ShortRange(), + report.FilterGenerated(), + report.Fixes(edit.Fix("replace loop with call to copy()", edit.ReplaceWithNode(pass.Fset, node, r))), + } + report.Report(pass, node, fmt.Sprintf("should use copy(%s, %s) instead of a loop", to, from), opts...) } - report.Report(pass, node, "should use copy() instead of a loop", opts...) } } code.Preorder(pass, fn, (*ast.ForStmt)(nil), (*ast.RangeStmt)(nil)) @@ -232,7 +237,7 @@ func CheckIfBoolCmp(pass *analysis.Pass) (interface{}, error) { other = expr.X } - ok := typeutil.All(pass.TypesInfo.TypeOf(other), func(term *typeparams.Term) bool { + ok := typeutil.All(pass.TypesInfo.TypeOf(other), func(term *types.Term) bool { basic, ok := term.Type().Underlying().(*types.Basic) return ok && basic.Kind() == types.Bool }) @@ -597,12 +602,11 @@ func negate(expr ast.Expr) ast.Expr { // CheckRedundantNilCheckWithLen checks for the following redundant nil-checks: // -// if x == nil || len(x) == 0 {} -// if x != nil && len(x) != 0 {} -// if x != nil && len(x) == N {} (where N != 0) -// if x != nil && len(x) > N {} -// if x != nil && len(x) >= N {} (where N != 0) -// +// if x == nil || len(x) == 0 {} +// if x != nil && len(x) != 0 {} +// if x != nil && len(x) == N {} (where N != 0) +// if x != nil && len(x) > N {} +// if x != nil && len(x) >= N {} (where N != 0) func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { isConstZero := func(expr ast.Expr) (isConst bool, isZero bool) { _, ok := expr.(*ast.BasicLit) @@ -705,7 +709,7 @@ func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { // finally check that xx type is one of array, slice, map or chan // this is to prevent false positive in case if xx is a pointer to an array typ := pass.TypesInfo.TypeOf(xx) - ok = typeutil.All(typ, func(term *typeparams.Term) bool { + ok = typeutil.All(typ, func(term *types.Term) bool { switch term.Type().Underlying().(type) { case *types.Slice: return true @@ -715,7 +719,7 @@ func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) { return true case *types.Pointer: return false - case *typeparams.TypeParam: + case *types.TypeParam: return false default: lint.ExhaustiveTypeSwitch(term.Type().Underlying()) @@ -766,22 +770,63 @@ func refersTo(pass *analysis.Pass, expr ast.Expr, ident types.Object) bool { } var checkLoopAppendQ = pattern.MustParse(` +(Or (RangeStmt (Ident "_") val@(Object _) _ x - [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]) `) + [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]) + (RangeStmt + idx@(Object _) + nil + _ + x + [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs (IndexExpr x idx)])])]) + (RangeStmt + idx@(Object _) + nil + _ + x + [(AssignStmt val@(Object _) ":=" (IndexExpr x idx)) + (AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]))`) func CheckLoopAppend(pass *analysis.Pass) (interface{}, error) { + pure := pass.ResultOf[purity.Analyzer].(purity.Result) + fn := func(node ast.Node) { m, ok := code.Match(pass, checkLoopAppendQ, node) if !ok { return } - val := m.State["val"].(types.Object) - if refersTo(pass, m.State["lhs"].(ast.Expr), val) { + if val, ok := m.State["val"].(types.Object); ok && refersTo(pass, m.State["lhs"].(ast.Expr), val) { + return + } + + if m.State["idx"] != nil && code.MayHaveSideEffects(pass, m.State["x"].(ast.Expr), pure) { + // When using an index-based loop, x gets evaluated repeatedly and thus should be pure. + // This doesn't matter for value-based loops, because x only gets evaluated once. + return + } + + if idx, ok := m.State["idx"].(types.Object); ok && refersTo(pass, m.State["lhs"].(ast.Expr), idx) { + // The lhs mustn't refer to the index loop variable. + return + } + + if code.MayHaveSideEffects(pass, m.State["lhs"].(ast.Expr), pure) { + // The lhs may be dynamic and return different values on each iteration. For example: + // + // func bar() map[int][]int { /* return one of several maps */ } + // + // func foo(x []int, y [][]int) { + // for i := range x { + // bar()[0] = append(bar()[0], x[i]) + // } + // } + // + // The dynamic nature of the lhs might also affect the value of the index. return } @@ -985,7 +1030,7 @@ func CheckSimplerStructConversion(pass *analysis.Pass) (interface{}, error) { return } // All fields must be initialized from the same object - if ident != nil && ident.Obj != id.Obj { + if ident != nil && pass.TypesInfo.ObjectOf(ident) != pass.TypesInfo.ObjectOf(id) { return } typ2, _ = t.(*types.Named) @@ -1046,7 +1091,7 @@ func CheckTrim(pass *analysis.Pass) (interface{}, error) { switch node1 := node1.(type) { case *ast.Ident: - return node1.Obj == node2.(*ast.Ident).Obj + return pass.TypesInfo.ObjectOf(node1) == pass.TypesInfo.ObjectOf(node2.(*ast.Ident)) case *ast.SelectorExpr, *ast.IndexExpr: return astutil.Equal(node1, node2) case *ast.BasicLit: @@ -1372,7 +1417,7 @@ func CheckDeclareAssign(pass *analysis.Pass) (interface{}, error) { } for _, lhs := range assign.Lhs { if oident, ok := lhs.(*ast.Ident); ok { - if oident.Obj == ident.Obj { + if pass.TypesInfo.ObjectOf(oident) == pass.TypesInfo.ObjectOf(ident) { num++ } } @@ -1413,7 +1458,7 @@ func CheckDeclareAssign(pass *analysis.Pass) (interface{}, error) { if !ok { continue } - if vspec.Names[0].Obj != ident.Obj { + if pass.TypesInfo.ObjectOf(vspec.Names[0]) != pass.TypesInfo.ObjectOf(ident) { continue } @@ -1618,11 +1663,11 @@ func CheckNilCheckAroundRange(pass *analysis.Pass) (interface{}, error) { if !ok { return } - ok = typeutil.All(m.State["x"].(types.Object).Type(), func(term *typeparams.Term) bool { + ok = typeutil.All(m.State["x"].(types.Object).Type(), func(term *types.Term) bool { switch term.Type().Underlying().(type) { case *types.Slice, *types.Map: return true - case *typeparams.TypeParam, *types.Chan, *types.Pointer: + case *types.TypeParam, *types.Chan, *types.Pointer: return false default: lint.ExhaustiveTypeSwitch(term.Type().Underlying()) diff --git a/vendor/honnef.co/go/tools/staticcheck/analysis.go b/vendor/honnef.co/go/tools/staticcheck/analysis.go index 171467a8f9..210c348c9c 100644 --- a/vendor/honnef.co/go/tools/staticcheck/analysis.go +++ b/vendor/honnef.co/go/tools/staticcheck/analysis.go @@ -174,7 +174,7 @@ var Analyzers = lint.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{ Requires: []*analysis.Analyzer{inspect.Analyzer, tokenfile.Analyzer}, }, "SA4017": { - Run: CheckPureFunctions, + Run: CheckSideEffectFreeCalls, Requires: []*analysis.Analyzer{buildir.Analyzer, purity.Analyzer}, }, "SA4018": { diff --git a/vendor/honnef.co/go/tools/staticcheck/doc.go b/vendor/honnef.co/go/tools/staticcheck/doc.go index 3e991b6c4a..c28fdbf04b 100644 --- a/vendor/honnef.co/go/tools/staticcheck/doc.go +++ b/vendor/honnef.co/go/tools/staticcheck/doc.go @@ -506,7 +506,7 @@ falsify results.`, }, "SA4017": { - Title: `A pure function's return value is discarded, making the call pointless`, + Title: `Discarding the return values of a function without side effects, making the call pointless`, Since: "2017.1", Severity: lint.SeverityWarning, MergeIf: lint.MergeIfAll, @@ -1286,7 +1286,7 @@ the \'else\' branch. This means that in the following example if x, ok := x.(int); ok { // ... } else { - fmt.Println("unexpected type %T", x) + fmt.Printf("unexpected type %T", x) } \'x\' in the \'else\' branch will refer to the \'x\' from \'x, ok diff --git a/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go b/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go index 15e302de4f..f65f2ddf9f 100644 --- a/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go +++ b/vendor/honnef.co/go/tools/staticcheck/fakejson/encode.go @@ -46,18 +46,18 @@ type UnsupportedTypeError struct { type encoder struct { // TODO we track addressable and non-addressable instances separately out of an abundance of caution. We don't know // if this is actually required for correctness. - seenCanAddr typeutil.Map - seenCantAddr typeutil.Map + seenCanAddr typeutil.Map[struct{}] + seenCantAddr typeutil.Map[struct{}] } func (enc *encoder) newTypeEncoder(t fakereflect.TypeAndCanAddr, stack string) *UnsupportedTypeError { - var m *typeutil.Map + var m *typeutil.Map[struct{}] if t.CanAddr() { m = &enc.seenCanAddr } else { m = &enc.seenCantAddr } - if ok := m.At(t.Type); ok != nil { + if _, ok := m.At(t.Type); ok { return nil } m.Set(t.Type, struct{}{}) diff --git a/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go b/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go index e592bfca57..64fce5f5b2 100644 --- a/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go +++ b/vendor/honnef.co/go/tools/staticcheck/fakexml/marshal.go @@ -28,8 +28,8 @@ func Marshal(v types.Type) error { type Encoder struct { // TODO we track addressable and non-addressable instances separately out of an abundance of caution. We don't know // if this is actually required for correctness. - seenCanAddr typeutil.Map - seenCantAddr typeutil.Map + seenCanAddr typeutil.Map[struct{}] + seenCantAddr typeutil.Map[struct{}] } func NewEncoder() *Encoder { @@ -114,13 +114,13 @@ func (err *CyclicTypeError) Error() string { // marshalValue writes one or more XML elements representing val. // If val was obtained from a struct field, finfo must have its details. func (e *Encoder) marshalValue(val fakereflect.TypeAndCanAddr, finfo *fieldInfo, startTemplate *StartElement, stack string) error { - var m *typeutil.Map + var m *typeutil.Map[struct{}] if val.CanAddr() { m = &e.seenCanAddr } else { m = &e.seenCantAddr } - if ok := m.At(val.Type); ok != nil { + if _, ok := m.At(val.Type); ok { return nil } m.Set(val.Type, struct{}{}) diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go index 3dd0fece07..b7c0f91375 100644 --- a/vendor/honnef.co/go/tools/staticcheck/lint.go +++ b/vendor/honnef.co/go/tools/staticcheck/lint.go @@ -524,13 +524,13 @@ func checkPrintfCallImpl(carg *Argument, f ir.Value, args []ir.Value) { return true } - var seen typeutil.Map + var seen typeutil.Map[struct{}] var checkType func(verb rune, T types.Type, top bool) bool checkType = func(verb rune, T types.Type, top bool) bool { if top { - seen = typeutil.Map{} + seen = typeutil.Map[struct{}]{} } - if ok := seen.At(T); ok != nil { + if _, ok := seen.At(T); ok { return true } seen.Set(T, struct{}{}) @@ -1144,17 +1144,31 @@ func CheckDubiousDeferInChannelRangeLoop(pass *analysis.Pass) (interface{}, erro if !ok { return } + + stmts := []*ast.DeferStmt{} + exits := false fn2 := func(node ast.Node) bool { switch stmt := node.(type) { case *ast.DeferStmt: - report.Report(pass, stmt, "defers in this range loop won't run unless the channel gets closed") + stmts = append(stmts, stmt) case *ast.FuncLit: // Don't look into function bodies return false + case *ast.ReturnStmt: + exits = true + case *ast.BranchStmt: + exits = node.(*ast.BranchStmt).Tok == token.BREAK } return true } ast.Inspect(loop.Body, fn2) + + if exits { + return + } + for _, stmt := range stmts { + report.Report(pass, stmt, "defers in this range loop won't run unless the channel gets closed") + } } code.Preorder(pass, fn, (*ast.RangeStmt)(nil)) return nil, nil @@ -1296,7 +1310,7 @@ func CheckLhsRhsIdentical(pass *analysis.Pass) (interface{}, error) { // no terms, so floats are a possibility return true } - return tset.Any(func(term *typeparams.Term) bool { + return tset.Any(func(term *types.Term) bool { switch typ := term.Type().Underlying().(type) { case *types.Basic: kind := typ.Kind() @@ -1554,7 +1568,7 @@ func CheckEarlyDefer(pass *analysis.Pass) (interface{}, error) { if !ok { continue } - if ident.Obj != lhs.Obj { + if pass.TypesInfo.ObjectOf(ident) != pass.TypesInfo.ObjectOf(lhs) { continue } if sel.Sel.Name != "Close" { @@ -1598,7 +1612,7 @@ func CheckEmptyCriticalSection(pass *analysis.Pass) (interface{}, error) { if !ok { return nil, "", false } - call, ok := expr.X.(*ast.CallExpr) + call, ok := astutil.Unparen(expr.X).(*ast.CallExpr) if !ok { return nil, "", false } @@ -2085,7 +2099,7 @@ func CheckLoopCondition(pass *analysis.Pass) (interface{}, error) { if !ok { return true } - if x.Obj != lhs.Obj { + if pass.TypesInfo.ObjectOf(x) != pass.TypesInfo.ObjectOf(lhs) { return true } if _, ok := loop.Post.(*ast.IncDecStmt); !ok { @@ -2225,13 +2239,13 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { if body == nil { return } - labels := map[*ast.Object]ast.Stmt{} + labels := map[types.Object]ast.Stmt{} ast.Inspect(body, func(node ast.Node) bool { label, ok := node.(*ast.LabeledStmt) if !ok { return true } - labels[label.Label.Obj] = label.Stmt + labels[pass.TypesInfo.ObjectOf(label.Label)] = label.Stmt return true }) @@ -2243,7 +2257,7 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { body = node.Body loop = node case *ast.RangeStmt: - ok := typeutil.All(pass.TypesInfo.TypeOf(node.X), func(term *typeparams.Term) bool { + ok := typeutil.All(pass.TypesInfo.TypeOf(node.X), func(term *types.Term) bool { switch term.Type().Underlying().(type) { case *types.Slice, *types.Chan, *types.Basic, *types.Pointer, *types.Array: return true @@ -2283,11 +2297,11 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { case *ast.BranchStmt: switch stmt.Tok { case token.BREAK: - if stmt.Label == nil || labels[stmt.Label.Obj] == loop { + if stmt.Label == nil || labels[pass.TypesInfo.ObjectOf(stmt.Label)] == loop { unconditionalExit = stmt } case token.CONTINUE: - if stmt.Label == nil || labels[stmt.Label.Obj] == loop { + if stmt.Label == nil || labels[pass.TypesInfo.ObjectOf(stmt.Label)] == loop { unconditionalExit = nil return false } @@ -2309,7 +2323,7 @@ func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) { unconditionalExit = nil return false case token.CONTINUE: - if branch.Label != nil && labels[branch.Label.Obj] != loop { + if branch.Label != nil && labels[pass.TypesInfo.ObjectOf(branch.Label)] != loop { return true } unconditionalExit = nil @@ -2899,7 +2913,7 @@ func CheckRepeatedIfElse(pass *analysis.Pass) (interface{}, error) { func CheckSillyBitwiseOps(pass *analysis.Pass) (interface{}, error) { fn := func(node ast.Node) { binop := node.(*ast.BinaryExpr) - if !typeutil.All(pass.TypesInfo.TypeOf(binop), func(term *typeparams.Term) bool { + if !typeutil.All(pass.TypesInfo.TypeOf(binop), func(term *types.Term) bool { b, ok := term.Type().Underlying().(*types.Basic) if !ok { return false @@ -3009,7 +3023,7 @@ func CheckNonOctalFileMode(pass *analysis.Pass) (interface{}, error) { return nil, nil } -func CheckPureFunctions(pass *analysis.Pass) (interface{}, error) { +func CheckSideEffectFreeCalls(pass *analysis.Pass) (interface{}, error) { pure := pass.ResultOf[purity.Analyzer].(purity.Result) fnLoop: @@ -3055,7 +3069,7 @@ fnLoop: // special case for benchmarks in the fmt package continue } - report.Report(pass, ins, fmt.Sprintf("%s is a pure function but its return value is ignored", callee.Object().Name())) + report.Report(pass, ins, fmt.Sprintf("%s doesn't have side effects and its return value is ignored", callee.Object().Name())) } } } @@ -3095,32 +3109,25 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { return } if ok { - switch std.AlternativeAvailableSince { - case knowledge.DeprecatedNeverUse: - // This should never be used, regardless of the - // targeted Go version. Examples include insecure - // cryptography or inherently broken APIs. - // - // We always want to flag these. - case knowledge.DeprecatedUseNoLonger: - // This should no longer be used. Using it with - // older Go versions might still make sense. - if !code.IsGoVersion(pass, std.DeprecatedSince) { - return - } - default: - if std.AlternativeAvailableSince < 0 { - panic(fmt.Sprintf("unhandled case %d", std.AlternativeAvailableSince)) - } - // Look for the first available alternative, not the first - // version something was deprecated in. If a function was - // deprecated in Go 1.6, an alternative has been available - // already in 1.0, and we're targeting 1.2, it still - // makes sense to use the alternative from 1.0, to be - // future-proof. - if !code.IsGoVersion(pass, std.AlternativeAvailableSince) { - return - } + // In the past, we made use of the AlternativeAvailableSince field. If a function was deprecated in Go + // 1.6 and an alternative had been available in Go 1.0, then we'd recommend using the alternative even + // if targeting Go 1.2. The idea was to suggest writing future-proof code by using already-existing + // alternatives. This had a major flaw, however: the user would need to use at least Go 1.6 for + // Staticcheck to know that the function had been deprecated. Thus, targeting Go 1.2 and using Go 1.2 + // would behave differently from targeting Go 1.2 and using Go 1.6. This is especially a problem if the + // user tries to ignore the warning. Depending on the Go version in use, the ignore directive may or may + // not match, causing a warning of its own. + // + // To avoid this issue, we no longer try to be smart. We now only compare the targeted version against + // the version that deprecated an object. + // + // Unfortunately, this issue also applies to AlternativeAvailableSince == DeprecatedNeverUse. Even though it + // is only applied to seriously flawed API, such as broken cryptography, users may wish to ignore those + // warnings. + // + // See also https://staticcheck.io/issues/1318. + if !code.IsGoVersion(pass, std.DeprecatedSince) { + return } } @@ -3166,6 +3173,8 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { if fn, ok := node.(*ast.FuncDecl); ok { tfn = pass.TypesInfo.ObjectOf(fn.Name) } + + // FIXME(dh): this misses dot-imported objects sel, ok := node.(*ast.SelectorExpr) if !ok { return true @@ -3173,13 +3182,31 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { obj := pass.TypesInfo.ObjectOf(sel.Sel) if obj_, ok := obj.(*types.Func); ok { - obj = typeparams.OriginMethod(obj_) + obj = obj_.Origin() } if obj.Pkg() == nil { return true } - if pass.Pkg == obj.Pkg() || obj.Pkg().Path()+"_test" == pass.Pkg.Path() { - // Don't flag stuff in our own package + + if obj.Pkg() == pass.Pkg { + // A package is allowed to use its own deprecated objects + return true + } + + // A package "foo" has two related packages "foo_test" and "foo.test", for external tests and the package main + // generated by 'go test' respectively. "foo_test" can import and use "foo", "foo.test" imports and uses "foo" + // and "foo_test". + + if strings.TrimSuffix(pass.Pkg.Path(), "_test") == obj.Pkg().Path() { + // foo_test (the external tests of foo) can use objects from foo. + return true + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == obj.Pkg().Path() { + // foo.test (the main package of foo's tests) can use objects from foo. + return true + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == strings.TrimSuffix(obj.Pkg().Path(), "_test") { + // foo.test (the main package of foo's tests) can use objects from foo's external tests. return true } @@ -3208,6 +3235,19 @@ func CheckDeprecated(pass *analysis.Pass) (interface{}, error) { } } + if strings.TrimSuffix(pass.Pkg.Path(), "_test") == path { + // foo_test can import foo + return + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == path { + // foo.test can import foo + return + } + if strings.TrimSuffix(pass.Pkg.Path(), ".test") == strings.TrimSuffix(path, "_test") { + // foo.test can import foo_test + return + } + handleDeprecation(depr, spec.Path, path, path, nil) } } @@ -3421,7 +3461,7 @@ func CheckMapBytesKey(pass *analysis.Pass) (interface{}, error) { } tset := typeutil.NewTypeSet(conv.X.Type()) // If at least one of the types is []byte, then it's more efficient to inline the conversion - if !tset.Any(func(term *typeparams.Term) bool { + if !tset.Any(func(term *types.Term) bool { s, ok := term.Type().Underlying().(*types.Slice) return ok && s.Elem().Underlying() == types.Universe.Lookup("byte").Type() }) { @@ -3996,12 +4036,12 @@ func CheckImpossibleTypeAssertion(pass *analysis.Pass) (interface{}, error) { ms := msc.MethodSet(left) for i := 0; i < righti.NumMethods(); i++ { - mr := righti.Method(i) + mr := righti.Method(i).Origin() sel := ms.Lookup(mr.Pkg(), mr.Name()) if sel == nil { continue } - ml := sel.Obj().(*types.Func) + ml := sel.Obj().(*types.Func).Origin() if types.AssignableTo(ml.Type(), mr.Type()) { continue } @@ -4135,7 +4175,7 @@ func CheckMaybeNil(pass *analysis.Pass) (interface{}, error) { ptr = instr.Addr case *ir.IndexAddr: ptr = instr.X - if typeutil.All(ptr.Type(), func(term *typeparams.Term) bool { + if typeutil.All(ptr.Type(), func(term *types.Term) bool { if _, ok := term.Type().Underlying().(*types.Slice); ok { return true } diff --git a/vendor/honnef.co/go/tools/stylecheck/lint.go b/vendor/honnef.co/go/tools/stylecheck/lint.go index 4203c610fa..a15e6d97ac 100644 --- a/vendor/honnef.co/go/tools/stylecheck/lint.go +++ b/vendor/honnef.co/go/tools/stylecheck/lint.go @@ -24,7 +24,6 @@ import ( "honnef.co/go/tools/internal/passes/buildir" "honnef.co/go/tools/pattern" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -417,9 +416,9 @@ func CheckErrorStrings(pass *analysis.Pass) (interface{}, error) { continue } for _, c := range word[n:] { - if unicode.IsUpper(c) { - // Word is probably an initialism or - // multi-word function name + if unicode.IsUpper(c) || unicode.IsDigit(c) { + // Word is probably an initialism or multi-word function name. Digits cover elliptic curves like + // P384. continue instrLoop } } @@ -846,7 +845,7 @@ func CheckExportedFunctionDocs(pass *analysis.Pass) (interface{}, error) { switch T := T.(type) { case *ast.IndexExpr: ident = T.X.(*ast.Ident) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: ident = T.X.(*ast.Ident) case *ast.Ident: ident = T diff --git a/vendor/honnef.co/go/tools/stylecheck/names.go b/vendor/honnef.co/go/tools/stylecheck/names.go index f038d0632f..d37bf6baf6 100644 --- a/vendor/honnef.co/go/tools/stylecheck/names.go +++ b/vendor/honnef.co/go/tools/stylecheck/names.go @@ -114,7 +114,11 @@ func CheckNames(pass *analysis.Pass) (interface{}, error) { return } - if code.IsInTest(pass, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + if code.IsInTest(pass, v) && + (strings.HasPrefix(v.Name.Name, "Example") || + strings.HasPrefix(v.Name.Name, "Test") || + strings.HasPrefix(v.Name.Name, "Benchmark") || + strings.HasPrefix(v.Name.Name, "Fuzz")) { return } diff --git a/vendor/honnef.co/go/tools/unused/edge.go b/vendor/honnef.co/go/tools/unused/edge.go deleted file mode 100644 index 6d32946d29..0000000000 --- a/vendor/honnef.co/go/tools/unused/edge.go +++ /dev/null @@ -1,59 +0,0 @@ -package unused - -//go:generate go run golang.org/x/tools/cmd/stringer@master -type edgeKind -type edgeKind uint64 - -func (e edgeKind) is(o edgeKind) bool { - return e&o != 0 -} - -const ( - edgeAlias edgeKind = 1 << iota - edgeBlankField - edgeAnonymousStruct - edgeCgoExported - edgeConstGroup - edgeElementType - edgeEmbeddedInterface - edgeExportedConstant - edgeExportedField - edgeExportedFunction - edgeExportedMethod - edgeExportedType - edgeExportedVariable - edgeExtendsExportedFields - edgeExtendsExportedMethodSet - edgeFieldAccess - edgeFunctionArgument - edgeFunctionResult - edgeFunctionSignature - edgeImplements - edgeInstructionOperand - edgeInterfaceCall - edgeInterfaceMethod - edgeKeyType - edgeLinkname - edgeMainFunction - edgeNamedType - edgeNetRPCRegister - edgeNoCopySentinel - edgeProvidesMethod - edgeReceiver - edgeRuntimeFunction - edgeSignature - edgeStructConversion - edgeTestSink - edgeTupleElement - edgeType - edgeTypeName - edgeUnderlyingType - edgePointerType - edgeUnsafeConversion - edgeUsedConstant - edgeVarDecl - edgeIgnored - edgeSamePointer - edgeTypeParam - edgeTypeArg - edgeUnionTerm -) diff --git a/vendor/honnef.co/go/tools/unused/edgekind_string.go b/vendor/honnef.co/go/tools/unused/edgekind_string.go deleted file mode 100644 index ae27b2507d..0000000000 --- a/vendor/honnef.co/go/tools/unused/edgekind_string.go +++ /dev/null @@ -1,119 +0,0 @@ -// Code generated by "stringer -type edgeKind"; DO NOT EDIT. - -package unused - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[edgeAlias-1] - _ = x[edgeBlankField-2] - _ = x[edgeAnonymousStruct-4] - _ = x[edgeCgoExported-8] - _ = x[edgeConstGroup-16] - _ = x[edgeElementType-32] - _ = x[edgeEmbeddedInterface-64] - _ = x[edgeExportedConstant-128] - _ = x[edgeExportedField-256] - _ = x[edgeExportedFunction-512] - _ = x[edgeExportedMethod-1024] - _ = x[edgeExportedType-2048] - _ = x[edgeExportedVariable-4096] - _ = x[edgeExtendsExportedFields-8192] - _ = x[edgeExtendsExportedMethodSet-16384] - _ = x[edgeFieldAccess-32768] - _ = x[edgeFunctionArgument-65536] - _ = x[edgeFunctionResult-131072] - _ = x[edgeFunctionSignature-262144] - _ = x[edgeImplements-524288] - _ = x[edgeInstructionOperand-1048576] - _ = x[edgeInterfaceCall-2097152] - _ = x[edgeInterfaceMethod-4194304] - _ = x[edgeKeyType-8388608] - _ = x[edgeLinkname-16777216] - _ = x[edgeMainFunction-33554432] - _ = x[edgeNamedType-67108864] - _ = x[edgeNetRPCRegister-134217728] - _ = x[edgeNoCopySentinel-268435456] - _ = x[edgeProvidesMethod-536870912] - _ = x[edgeReceiver-1073741824] - _ = x[edgeRuntimeFunction-2147483648] - _ = x[edgeSignature-4294967296] - _ = x[edgeStructConversion-8589934592] - _ = x[edgeTestSink-17179869184] - _ = x[edgeTupleElement-34359738368] - _ = x[edgeType-68719476736] - _ = x[edgeTypeName-137438953472] - _ = x[edgeUnderlyingType-274877906944] - _ = x[edgePointerType-549755813888] - _ = x[edgeUnsafeConversion-1099511627776] - _ = x[edgeUsedConstant-2199023255552] - _ = x[edgeVarDecl-4398046511104] - _ = x[edgeIgnored-8796093022208] - _ = x[edgeSamePointer-17592186044416] - _ = x[edgeTypeParam-35184372088832] - _ = x[edgeTypeArg-70368744177664] - _ = x[edgeUnionTerm-140737488355328] -} - -const _edgeKind_name = "edgeAliasedgeBlankFieldedgeAnonymousStructedgeCgoExportededgeConstGroupedgeElementTypeedgeEmbeddedInterfaceedgeExportedConstantedgeExportedFieldedgeExportedFunctionedgeExportedMethodedgeExportedTypeedgeExportedVariableedgeExtendsExportedFieldsedgeExtendsExportedMethodSetedgeFieldAccessedgeFunctionArgumentedgeFunctionResultedgeFunctionSignatureedgeImplementsedgeInstructionOperandedgeInterfaceCalledgeInterfaceMethodedgeKeyTypeedgeLinknameedgeMainFunctionedgeNamedTypeedgeNetRPCRegisteredgeNoCopySentineledgeProvidesMethodedgeReceiveredgeRuntimeFunctionedgeSignatureedgeStructConversionedgeTestSinkedgeTupleElementedgeTypeedgeTypeNameedgeUnderlyingTypeedgePointerTypeedgeUnsafeConversionedgeUsedConstantedgeVarDecledgeIgnorededgeSamePointeredgeTypeParamedgeTypeArgedgeUnionTerm" - -var _edgeKind_map = map[edgeKind]string{ - 1: _edgeKind_name[0:9], - 2: _edgeKind_name[9:23], - 4: _edgeKind_name[23:42], - 8: _edgeKind_name[42:57], - 16: _edgeKind_name[57:71], - 32: _edgeKind_name[71:86], - 64: _edgeKind_name[86:107], - 128: _edgeKind_name[107:127], - 256: _edgeKind_name[127:144], - 512: _edgeKind_name[144:164], - 1024: _edgeKind_name[164:182], - 2048: _edgeKind_name[182:198], - 4096: _edgeKind_name[198:218], - 8192: _edgeKind_name[218:243], - 16384: _edgeKind_name[243:271], - 32768: _edgeKind_name[271:286], - 65536: _edgeKind_name[286:306], - 131072: _edgeKind_name[306:324], - 262144: _edgeKind_name[324:345], - 524288: _edgeKind_name[345:359], - 1048576: _edgeKind_name[359:381], - 2097152: _edgeKind_name[381:398], - 4194304: _edgeKind_name[398:417], - 8388608: _edgeKind_name[417:428], - 16777216: _edgeKind_name[428:440], - 33554432: _edgeKind_name[440:456], - 67108864: _edgeKind_name[456:469], - 134217728: _edgeKind_name[469:487], - 268435456: _edgeKind_name[487:505], - 536870912: _edgeKind_name[505:523], - 1073741824: _edgeKind_name[523:535], - 2147483648: _edgeKind_name[535:554], - 4294967296: _edgeKind_name[554:567], - 8589934592: _edgeKind_name[567:587], - 17179869184: _edgeKind_name[587:599], - 34359738368: _edgeKind_name[599:615], - 68719476736: _edgeKind_name[615:623], - 137438953472: _edgeKind_name[623:635], - 274877906944: _edgeKind_name[635:653], - 549755813888: _edgeKind_name[653:668], - 1099511627776: _edgeKind_name[668:688], - 2199023255552: _edgeKind_name[688:704], - 4398046511104: _edgeKind_name[704:715], - 8796093022208: _edgeKind_name[715:726], - 17592186044416: _edgeKind_name[726:741], - 35184372088832: _edgeKind_name[741:754], - 70368744177664: _edgeKind_name[754:765], - 140737488355328: _edgeKind_name[765:778], -} - -func (i edgeKind) String() string { - if str, ok := _edgeKind_map[i]; ok { - return str - } - return "edgeKind(" + strconv.FormatInt(int64(i), 10) + ")" -} diff --git a/vendor/honnef.co/go/tools/unused/implements.go b/vendor/honnef.co/go/tools/unused/implements.go index f62018572b..2a202c6d73 100644 --- a/vendor/honnef.co/go/tools/unused/implements.go +++ b/vendor/honnef.co/go/tools/unused/implements.go @@ -37,7 +37,7 @@ func sameId(obj types.Object, pkg *types.Package, name string) bool { return pkg.Path() == obj.Pkg().Path() } -func (g *graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { +func implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) { // fast path for common case if T.Empty() { return nil, true diff --git a/vendor/honnef.co/go/tools/unused/runtime.go b/vendor/honnef.co/go/tools/unused/runtime.go new file mode 100644 index 0000000000..11be4a34af --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/runtime.go @@ -0,0 +1,331 @@ +package unused + +// Functions defined in the Go runtime that may be called through +// compiler magic or via assembly. +var runtimeFuncs = map[string]bool{ + // Copied from cmd/compile/internal/typecheck/builtin.go, var runtimeDecls + "newobject": true, + "panicindex": true, + "panicslice": true, + "panicdivide": true, + "panicmakeslicelen": true, + "throwinit": true, + "panicwrap": true, + "gopanic": true, + "gorecover": true, + "goschedguarded": true, + "printbool": true, + "printfloat": true, + "printint": true, + "printhex": true, + "printuint": true, + "printcomplex": true, + "printstring": true, + "printpointer": true, + "printiface": true, + "printeface": true, + "printslice": true, + "printnl": true, + "printsp": true, + "printlock": true, + "printunlock": true, + "concatstring2": true, + "concatstring3": true, + "concatstring4": true, + "concatstring5": true, + "concatstrings": true, + "cmpstring": true, + "intstring": true, + "slicebytetostring": true, + "slicebytetostringtmp": true, + "slicerunetostring": true, + "stringtoslicebyte": true, + "stringtoslicerune": true, + "slicecopy": true, + "slicestringcopy": true, + "decoderune": true, + "countrunes": true, + "convI2I": true, + "convT16": true, + "convT32": true, + "convT64": true, + "convTstring": true, + "convTslice": true, + "convT2E": true, + "convT2Enoptr": true, + "convT2I": true, + "convT2Inoptr": true, + "assertE2I": true, + "assertE2I2": true, + "assertI2I": true, + "assertI2I2": true, + "panicdottypeE": true, + "panicdottypeI": true, + "panicnildottype": true, + "ifaceeq": true, + "efaceeq": true, + "fastrand": true, + "makemap64": true, + "makemap": true, + "makemap_small": true, + "mapaccess1": true, + "mapaccess1_fast32": true, + "mapaccess1_fast64": true, + "mapaccess1_faststr": true, + "mapaccess1_fat": true, + "mapaccess2": true, + "mapaccess2_fast32": true, + "mapaccess2_fast64": true, + "mapaccess2_faststr": true, + "mapaccess2_fat": true, + "mapassign": true, + "mapassign_fast32": true, + "mapassign_fast32ptr": true, + "mapassign_fast64": true, + "mapassign_fast64ptr": true, + "mapassign_faststr": true, + "mapiterinit": true, + "mapdelete": true, + "mapdelete_fast32": true, + "mapdelete_fast64": true, + "mapdelete_faststr": true, + "mapiternext": true, + "mapclear": true, + "makechan64": true, + "makechan": true, + "chanrecv1": true, + "chanrecv2": true, + "chansend1": true, + "closechan": true, + "writeBarrier": true, + "typedmemmove": true, + "typedmemclr": true, + "typedslicecopy": true, + "selectnbsend": true, + "selectnbrecv": true, + "selectnbrecv2": true, + "selectsetpc": true, + "selectgo": true, + "block": true, + "makeslice": true, + "makeslice64": true, + "growslice": true, + "memmove": true, + "memclrNoHeapPointers": true, + "memclrHasPointers": true, + "memequal": true, + "memequal8": true, + "memequal16": true, + "memequal32": true, + "memequal64": true, + "memequal128": true, + "int64div": true, + "uint64div": true, + "int64mod": true, + "uint64mod": true, + "float64toint64": true, + "float64touint64": true, + "float64touint32": true, + "int64tofloat64": true, + "uint64tofloat64": true, + "uint32tofloat64": true, + "complex128div": true, + "racefuncenter": true, + "racefuncenterfp": true, + "racefuncexit": true, + "raceread": true, + "racewrite": true, + "racereadrange": true, + "racewriterange": true, + "msanread": true, + "msanwrite": true, + "x86HasPOPCNT": true, + "x86HasSSE41": true, + "arm64HasATOMICS": true, + "mallocgc": true, + "panicshift": true, + "panicmakeslicecap": true, + "goPanicIndex": true, + "goPanicIndexU": true, + "goPanicSliceAlen": true, + "goPanicSliceAlenU": true, + "goPanicSliceAcap": true, + "goPanicSliceAcapU": true, + "goPanicSliceB": true, + "goPanicSliceBU": true, + "goPanicSlice3Alen": true, + "goPanicSlice3AlenU": true, + "goPanicSlice3Acap": true, + "goPanicSlice3AcapU": true, + "goPanicSlice3B": true, + "goPanicSlice3BU": true, + "goPanicSlice3C": true, + "goPanicSlice3CU": true, + "goPanicSliceConvert": true, + "printuintptr": true, + "convT": true, + "convTnoptr": true, + "makeslicecopy": true, + "unsafeslicecheckptr": true, + "panicunsafeslicelen": true, + "panicunsafeslicenilptr": true, + "unsafestringcheckptr": true, + "panicunsafestringlen": true, + "panicunsafestringnilptr": true, + "mulUintptr": true, + "memequal0": true, + "f32equal": true, + "f64equal": true, + "c64equal": true, + "c128equal": true, + "strequal": true, + "interequal": true, + "nilinterequal": true, + "memhash": true, + "memhash0": true, + "memhash8": true, + "memhash16": true, + "memhash32": true, + "memhash64": true, + "memhash128": true, + "f32hash": true, + "f64hash": true, + "c64hash": true, + "c128hash": true, + "strhash": true, + "interhash": true, + "nilinterhash": true, + "int64tofloat32": true, + "uint64tofloat32": true, + "getcallerpc": true, + "getcallersp": true, + "msanmove": true, + "asanread": true, + "asanwrite": true, + "checkptrAlignment": true, + "checkptrArithmetic": true, + "libfuzzerTraceCmp1": true, + "libfuzzerTraceCmp2": true, + "libfuzzerTraceCmp4": true, + "libfuzzerTraceCmp8": true, + "libfuzzerTraceConstCmp1": true, + "libfuzzerTraceConstCmp2": true, + "libfuzzerTraceConstCmp4": true, + "libfuzzerTraceConstCmp8": true, + "libfuzzerHookStrCmp": true, + "libfuzzerHookEqualFold": true, + "addCovMeta": true, + "x86HasFMA": true, + "armHasVFPv4": true, + + // Extracted from assembly code in the standard library, with the exception of the runtime package itself + "abort": true, + "aeshashbody": true, + "args": true, + "asminit": true, + "badctxt": true, + "badmcall2": true, + "badmcall": true, + "badmorestackg0": true, + "badmorestackgsignal": true, + "badsignal2": true, + "callbackasm1": true, + "callCfunction": true, + "cgocallback_gofunc": true, + "cgocallbackg": true, + "checkgoarm": true, + "check": true, + "debugCallCheck": true, + "debugCallWrap": true, + "emptyfunc": true, + "entersyscall": true, + "exit": true, + "exits": true, + "exitsyscall": true, + "externalthreadhandler": true, + "findnull": true, + "goexit1": true, + "gostring": true, + "i386_set_ldt": true, + "_initcgo": true, + "init_thread_tls": true, + "ldt0setup": true, + "libpreinit": true, + "load_g": true, + "morestack": true, + "mstart": true, + "nacl_sysinfo": true, + "nanotimeQPC": true, + "nanotime": true, + "newosproc0": true, + "newproc": true, + "newstack": true, + "noted": true, + "nowQPC": true, + "osinit": true, + "printf": true, + "racecallback": true, + "reflectcallmove": true, + "reginit": true, + "rt0_go": true, + "save_g": true, + "schedinit": true, + "setldt": true, + "settls": true, + "sighandler": true, + "sigprofNonGo": true, + "sigtrampgo": true, + "_sigtramp": true, + "sigtramp": true, + "stackcheck": true, + "syscall_chdir": true, + "syscall_chroot": true, + "syscall_close": true, + "syscall_dup2": true, + "syscall_execve": true, + "syscall_exit": true, + "syscall_fcntl": true, + "syscall_forkx": true, + "syscall_gethostname": true, + "syscall_getpid": true, + "syscall_ioctl": true, + "syscall_pipe": true, + "syscall_rawsyscall6": true, + "syscall_rawSyscall6": true, + "syscall_rawsyscall": true, + "syscall_RawSyscall": true, + "syscall_rawsysvicall6": true, + "syscall_setgid": true, + "syscall_setgroups": true, + "syscall_setpgid": true, + "syscall_setsid": true, + "syscall_setuid": true, + "syscall_syscall6": true, + "syscall_syscall": true, + "syscall_Syscall": true, + "syscall_sysvicall6": true, + "syscall_wait4": true, + "syscall_write": true, + "traceback": true, + "tstart": true, + "usplitR0": true, + "wbBufFlush": true, + "write": true, + + // Other runtime functions that can get called in non-standard ways + "bgsweep": true, + "memhash_varlen": true, + "strhashFallback": true, + "asanregisterglobals": true, + "cgoUse": true, + "cgoCheckPointer": true, + "cgoCheckResult": true, + "_cgo_panic_internal": true, + "addExitHook": true, +} + +var runtimeCoverageFuncs = map[string]bool{ + "initHook": true, + "markProfileEmitted": true, + "processCoverTestDir": true, +} diff --git a/vendor/honnef.co/go/tools/unused/serialize.go b/vendor/honnef.co/go/tools/unused/serialize.go new file mode 100644 index 0000000000..126e7400aa --- /dev/null +++ b/vendor/honnef.co/go/tools/unused/serialize.go @@ -0,0 +1,99 @@ +package unused + +import ( + "fmt" + "go/token" + "os" + + "golang.org/x/tools/go/types/objectpath" +) + +type ObjectPath struct { + PkgPath string + ObjPath objectpath.Path +} + +// XXX make sure that node 0 always exists and is always the root + +type SerializedGraph struct { + nodes []Node + nodesByPath map[ObjectPath]NodeID + // XXX deduplicating on position is dubious for `switch x := foo.(type)`, where x will be declared many times for + // the different types, but all at the same position. On the other hand, merging these nodes is probably fine. + nodesByPosition map[token.Position]NodeID +} + +func trace(f string, args ...interface{}) { + fmt.Fprintf(os.Stderr, f, args...) + fmt.Fprintln(os.Stderr) +} + +func (g *SerializedGraph) Merge(nodes []Node) { + if g.nodesByPath == nil { + g.nodesByPath = map[ObjectPath]NodeID{} + } + if g.nodesByPosition == nil { + g.nodesByPosition = map[token.Position]NodeID{} + } + if len(g.nodes) == 0 { + // Seed nodes with a root node + g.nodes = append(g.nodes, Node{}) + } + // OPT(dh): reuse storage between calls to Merge + remapping := make([]NodeID, len(nodes)) + + // First pass: compute remapping of IDs of to-be-merged nodes + for _, n := range nodes { + // XXX Column is never 0. it's 1 if there is no column information in the export data. which sucks, because + // objects can also genuinely be in column 1. + if n.id != 0 && n.obj.Path == (ObjectPath{}) && n.obj.Position.Column == 0 { + // If the object has no path, then it couldn't have come from export data, which means it needs to have full + // position information including a column. + panic(fmt.Sprintf("object %q has no path but also no column information", n.obj.Name)) + } + + if orig, ok := g.nodesByPath[n.obj.Path]; ok { + // We already have a node for this object + trace("deduplicating %d -> %d based on path %s", n.id, orig, n.obj.Path) + remapping[n.id] = orig + } else if orig, ok := g.nodesByPosition[n.obj.Position]; ok && n.obj.Position.Column != 0 { + // We already have a node for this object + trace("deduplicating %d -> %d based on position %s", n.id, orig, n.obj.Position) + remapping[n.id] = orig + } else { + // This object is new to us; change ID to avoid collision + newID := NodeID(len(g.nodes)) + trace("new node, remapping %d -> %d", n.id, newID) + remapping[n.id] = newID + g.nodes = append(g.nodes, Node{ + id: newID, + obj: n.obj, + uses: make([]NodeID, 0, len(n.uses)), + owns: make([]NodeID, 0, len(n.owns)), + }) + if n.id == 0 { + // Our root uses all the roots of the subgraphs + g.nodes[0].uses = append(g.nodes[0].uses, newID) + } + if n.obj.Path != (ObjectPath{}) { + g.nodesByPath[n.obj.Path] = newID + } + if n.obj.Position.Column != 0 { + g.nodesByPosition[n.obj.Position] = newID + } + } + } + + // Second step: apply remapping + for _, n := range nodes { + n.id = remapping[n.id] + for i := range n.uses { + n.uses[i] = remapping[n.uses[i]] + } + for i := range n.owns { + n.owns[i] = remapping[n.owns[i]] + } + g.nodes[n.id].uses = append(g.nodes[n.id].uses, n.uses...) + g.nodes[n.id].owns = append(g.nodes[n.id].owns, n.owns...) + } +} diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go index f703ed3d1f..edd2630759 100644 --- a/vendor/honnef.co/go/tools/unused/unused.go +++ b/vendor/honnef.co/go/tools/unused/unused.go @@ -1,8 +1,6 @@ // Package unused contains code for finding unused code. package unused -// TODO(dh): don't add instantiated types/methods to the graph. add the origin types/methods. - import ( "fmt" "go/ast" @@ -12,66 +10,60 @@ import ( "reflect" "strings" - "honnef.co/go/tools/analysis/code" "honnef.co/go/tools/analysis/facts/directives" "honnef.co/go/tools/analysis/facts/generated" "honnef.co/go/tools/analysis/lint" "honnef.co/go/tools/analysis/report" "honnef.co/go/tools/go/ast/astutil" - "honnef.co/go/tools/go/ir" "honnef.co/go/tools/go/types/typeutil" - "honnef.co/go/tools/internal/passes/buildir" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/objectpath" ) -var Debug io.Writer +// OPT(dh): don't track local variables that can't have any interesting outgoing edges. For example, using a local +// variable of type int is meaningless; we don't care if `int` is used or not. +// +// Note that we do have to track variables with for example array types, because the array type could have involved a +// named constant. +// +// We probably have different culling needs depending on the mode of operation, too. If we analyze multiple packages in +// one graph (unused's "whole program" mode), we could remove further useless edges (e.g. into nodes that themselves +// have no outgoing edges and aren't meaningful objects on their own) after having analyzed a package, to keep the +// in-memory representation small on average. If we only analyze a single package, that step would just waste cycles, as +// we're about to throw the entire graph away, anyway. -// The graph we construct omits nodes along a path that do not -// contribute any new information to the solution. For example, the -// full graph for a function with a receiver would be Func -> -// Signature -> Var -> Type. However, since signatures cannot be -// unused, and receivers are always considered used, we can compact -// the graph down to Func -> Type. This makes the graph smaller, but -// harder to debug. +// TODO(dh): currently, types use methods that implement interfaces. However, this makes a method used even if the +// relevant interface is never used. What if instead interfaces used those methods? Right now we cannot do that, because +// methods use their receivers, so using a method uses the type. But do we need that edge? Is there a way to refer to a +// method without explicitly mentioning the type somewhere? If not, the edge from method to receiver is superfluous. -// TODO(dh): conversions between structs mark fields as used, but the -// conversion itself isn't part of that subgraph. even if the function -// containing the conversion is unused, the fields will be marked as -// used. +// XXX vet all code for proper use of core types // TODO(dh): we cannot observe function calls in assembly files. /* +This overview is true when using the default options. Different options may change individual behaviors. + - packages use: - (1.1) exported named types - - (1.2) exported functions + - (1.2) exported functions (but not methods!) - (1.3) exported variables - (1.4) exported constants - (1.5) init functions - (1.6) functions exported to cgo - (1.7) the main function iff in the main package - (1.8) symbols linked via go:linkname + - (1.9) objects in generated files - named types use: - (2.1) exported methods - (2.2) the type they're based on - - (2.3) all their aliases. we can't easily track uses of aliases - because go/types turns them into uses of the aliased types. assume - that if a type is used, so are all of its aliases. - - (2.4) the pointer type. this aids with eagerly implementing - interfaces. if a method that implements an interface is defined on - a pointer receiver, and the pointer type is never used, but the - named type is, then we still want to mark the method as used. - (2.5) all their type parameters. Unused type parameters are probably useless, but they're a brand new feature and we don't want to introduce false positives because we couldn't anticipate some novel use-case. - (2.6) all their type arguments -- variables and constants use: - - their types - - functions use: - (4.1) all their arguments, return parameters and receivers - (4.2) anonymous functions defined beneath them @@ -80,11 +72,14 @@ var Debug io.Writer that way we don't have to keep track of closures escaping functions. - (4.4) functions they return. we assume that someone else will call the returned function - (4.5) functions/interface methods they call - - types they instantiate or convert to + - (4.6) types they instantiate or convert to - (4.7) fields they access - - (4.8) types of all instructions - (4.9) package-level variables they assign to iff in tests (sinks for benchmarks) - (4.10) all their type parameters. See 2.5 for reasoning. + - (4.11) local variables + - Note that the majority of this is handled implicitly by seeing idents be used. In particular, unlike the old + IR-based implementation, the AST-based one doesn't care about closures, bound methods or anonymous functions. + They're all just additional nodes in the AST. - conversions use: - (5.1) when converting between two equivalent structs, the fields in @@ -107,7 +102,7 @@ var Debug io.Writer - (8.1) We do not technically care about interfaces that only consist of exported methods. Exported methods on concrete types are always marked as used. - - Any concrete type implements all known interfaces. Even if it isn't + - (8.2) Any concrete type implements all known interfaces. Even if it isn't assigned to any interfaces in our code, the user may receive a value of the type and expect to pass it back to us through an interface. @@ -126,20 +121,22 @@ var Debug io.Writer used by 8.3 just because it contributes A's methods to C. - Inherent uses: - - thunks and other generated wrappers call the real function - (9.2) variables use their types - (9.3) types use their underlying and element types - (9.4) conversions use the type they convert to - - (9.5) instructions use their operands - - (9.6) instructions use their operands' types - (9.7) variable _reads_ use variables, writes do not, except in tests - (9.8) runtime functions that may be called from user code via the compiler + - (9.9) objects named the blank identifier are used. They cannot be referred to and are usually used explicitly to + use something that would otherwise be unused. + - The majority of idents get marked as read by virtue of being in the AST. - const groups: - (10.1) if one constant out of a block of constants is used, mark all - of them used. a lot of the time, unused constants exist for the sake - of completeness. See also - https://github.com/dominikh/go-tools/issues/365 + - (10.1) if one constant out of a block of constants is used, mark all + of them used. a lot of the time, unused constants exist for the sake + of completeness. See also + https://github.com/dominikh/go-tools/issues/365 + + Do not, however, include constants named _ in constant groups. - (11.1) anonymous struct types use all their fields. we cannot @@ -155,274 +152,19 @@ var Debug io.Writer */ +var Debug io.Writer + func assert(b bool) { if !b { panic("failed assertion") } } -// /usr/lib/go/src/runtime/proc.go:433:6: func badmorestackg0 is unused (U1000) - -// Functions defined in the Go runtime that may be called through -// compiler magic or via assembly. -var runtimeFuncs = map[string]bool{ - // The first part of the list is copied from - // cmd/compile/internal/gc/builtin.go, var runtimeDecls - "newobject": true, - "panicindex": true, - "panicslice": true, - "panicdivide": true, - "panicmakeslicelen": true, - "throwinit": true, - "panicwrap": true, - "gopanic": true, - "gorecover": true, - "goschedguarded": true, - "printbool": true, - "printfloat": true, - "printint": true, - "printhex": true, - "printuint": true, - "printcomplex": true, - "printstring": true, - "printpointer": true, - "printiface": true, - "printeface": true, - "printslice": true, - "printnl": true, - "printsp": true, - "printlock": true, - "printunlock": true, - "concatstring2": true, - "concatstring3": true, - "concatstring4": true, - "concatstring5": true, - "concatstrings": true, - "cmpstring": true, - "intstring": true, - "slicebytetostring": true, - "slicebytetostringtmp": true, - "slicerunetostring": true, - "stringtoslicebyte": true, - "stringtoslicerune": true, - "slicecopy": true, - "slicestringcopy": true, - "decoderune": true, - "countrunes": true, - "convI2I": true, - "convT16": true, - "convT32": true, - "convT64": true, - "convTstring": true, - "convTslice": true, - "convT2E": true, - "convT2Enoptr": true, - "convT2I": true, - "convT2Inoptr": true, - "assertE2I": true, - "assertE2I2": true, - "assertI2I": true, - "assertI2I2": true, - "panicdottypeE": true, - "panicdottypeI": true, - "panicnildottype": true, - "ifaceeq": true, - "efaceeq": true, - "fastrand": true, - "makemap64": true, - "makemap": true, - "makemap_small": true, - "mapaccess1": true, - "mapaccess1_fast32": true, - "mapaccess1_fast64": true, - "mapaccess1_faststr": true, - "mapaccess1_fat": true, - "mapaccess2": true, - "mapaccess2_fast32": true, - "mapaccess2_fast64": true, - "mapaccess2_faststr": true, - "mapaccess2_fat": true, - "mapassign": true, - "mapassign_fast32": true, - "mapassign_fast32ptr": true, - "mapassign_fast64": true, - "mapassign_fast64ptr": true, - "mapassign_faststr": true, - "mapiterinit": true, - "mapdelete": true, - "mapdelete_fast32": true, - "mapdelete_fast64": true, - "mapdelete_faststr": true, - "mapiternext": true, - "mapclear": true, - "makechan64": true, - "makechan": true, - "chanrecv1": true, - "chanrecv2": true, - "chansend1": true, - "closechan": true, - "writeBarrier": true, - "typedmemmove": true, - "typedmemclr": true, - "typedslicecopy": true, - "selectnbsend": true, - "selectnbrecv": true, - "selectnbrecv2": true, - "selectsetpc": true, - "selectgo": true, - "block": true, - "makeslice": true, - "makeslice64": true, - "growslice": true, - "memmove": true, - "memclrNoHeapPointers": true, - "memclrHasPointers": true, - "memequal": true, - "memequal8": true, - "memequal16": true, - "memequal32": true, - "memequal64": true, - "memequal128": true, - "int64div": true, - "uint64div": true, - "int64mod": true, - "uint64mod": true, - "float64toint64": true, - "float64touint64": true, - "float64touint32": true, - "int64tofloat64": true, - "uint64tofloat64": true, - "uint32tofloat64": true, - "complex128div": true, - "racefuncenter": true, - "racefuncenterfp": true, - "racefuncexit": true, - "raceread": true, - "racewrite": true, - "racereadrange": true, - "racewriterange": true, - "msanread": true, - "msanwrite": true, - "x86HasPOPCNT": true, - "x86HasSSE41": true, - "arm64HasATOMICS": true, - - // The second part of the list is extracted from assembly code in - // the standard library, with the exception of the runtime package itself - "abort": true, - "aeshashbody": true, - "args": true, - "asminit": true, - "badctxt": true, - "badmcall2": true, - "badmcall": true, - "badmorestackg0": true, - "badmorestackgsignal": true, - "badsignal2": true, - "callbackasm1": true, - "callCfunction": true, - "cgocallback_gofunc": true, - "cgocallbackg": true, - "checkgoarm": true, - "check": true, - "debugCallCheck": true, - "debugCallWrap": true, - "emptyfunc": true, - "entersyscall": true, - "exit": true, - "exits": true, - "exitsyscall": true, - "externalthreadhandler": true, - "findnull": true, - "goexit1": true, - "gostring": true, - "i386_set_ldt": true, - "_initcgo": true, - "init_thread_tls": true, - "ldt0setup": true, - "libpreinit": true, - "load_g": true, - "morestack": true, - "mstart": true, - "nacl_sysinfo": true, - "nanotimeQPC": true, - "nanotime": true, - "newosproc0": true, - "newproc": true, - "newstack": true, - "noted": true, - "nowQPC": true, - "osinit": true, - "printf": true, - "racecallback": true, - "reflectcallmove": true, - "reginit": true, - "rt0_go": true, - "save_g": true, - "schedinit": true, - "setldt": true, - "settls": true, - "sighandler": true, - "sigprofNonGo": true, - "sigtrampgo": true, - "_sigtramp": true, - "sigtramp": true, - "stackcheck": true, - "syscall_chdir": true, - "syscall_chroot": true, - "syscall_close": true, - "syscall_dup2": true, - "syscall_execve": true, - "syscall_exit": true, - "syscall_fcntl": true, - "syscall_forkx": true, - "syscall_gethostname": true, - "syscall_getpid": true, - "syscall_ioctl": true, - "syscall_pipe": true, - "syscall_rawsyscall6": true, - "syscall_rawSyscall6": true, - "syscall_rawsyscall": true, - "syscall_RawSyscall": true, - "syscall_rawsysvicall6": true, - "syscall_setgid": true, - "syscall_setgroups": true, - "syscall_setpgid": true, - "syscall_setsid": true, - "syscall_setuid": true, - "syscall_syscall6": true, - "syscall_syscall": true, - "syscall_Syscall": true, - "syscall_sysvicall6": true, - "syscall_wait4": true, - "syscall_write": true, - "traceback": true, - "tstart": true, - "usplitR0": true, - "wbBufFlush": true, - "write": true, -} - -type pkg struct { - Fset *token.FileSet - Files []*ast.File - Pkg *types.Package - TypesInfo *types.Info - TypesSizes types.Sizes - IR *ir.Package - SrcFuncs []*ir.Function - Directives []lint.Directive -} - // TODO(dh): should we return a map instead of two slices? type Result struct { - Used []types.Object - Unused []types.Object -} - -type SerializedResult struct { - Used []SerializedObject - Unused []SerializedObject + Used []Object + Unused []Object + Quiet []Object } var Analyzer = &lint.Analyzer{ @@ -433,483 +175,326 @@ var Analyzer = &lint.Analyzer{ Name: "U1000", Doc: "Unused code", Run: run, - Requires: []*analysis.Analyzer{buildir.Analyzer, generated.Analyzer, directives.Analyzer}, + Requires: []*analysis.Analyzer{generated.Analyzer, directives.Analyzer}, ResultType: reflect.TypeOf(Result{}), }, } -type SerializedObject struct { - Name string - Position token.Position - DisplayPosition token.Position - Kind string - InGenerated bool -} - -func typString(obj types.Object) string { - switch obj := obj.(type) { - case *types.Func: - return "func" - case *types.Var: - if obj.IsField() { - return "field" - } - return "var" - case *types.Const: - return "const" - case *types.TypeName: - if _, ok := obj.Type().(*typeparams.TypeParam); ok { - return "type param" - } else { - return "type" - } - default: - return "identifier" - } +func newGraph( + fset *token.FileSet, + files []*ast.File, + pkg *types.Package, + info *types.Info, + directives []lint.Directive, + generated map[string]generated.Generator, + opts Options, +) *graph { + g := graph{ + pkg: pkg, + info: info, + files: files, + directives: directives, + generated: generated, + fset: fset, + nodes: []Node{{}}, + edges: map[edge]struct{}{}, + objects: map[types.Object]NodeID{}, + opts: opts, + } + + return &g } -func Serialize(pass *analysis.Pass, res Result, fset *token.FileSet) SerializedResult { - // OPT(dh): there's no point in serializing Used objects that are - // always used, such as exported names, blank identifiers, or - // anonymous struct fields. Used only exists to overrule Unused of - // a different package. If something can never be unused, then its - // presence in Used is useless. - // - // I'm not sure if this should happen when serializing, or when - // returning Result. - - out := SerializedResult{ - Used: make([]SerializedObject, len(res.Used)), - Unused: make([]SerializedObject, len(res.Unused)), - } - for i, obj := range res.Used { - out.Used[i] = serializeObject(pass, fset, obj) - } - for i, obj := range res.Unused { - out.Unused[i] = serializeObject(pass, fset, obj) - } - return out -} +func run(pass *analysis.Pass) (interface{}, error) { + g := newGraph( + pass.Fset, + pass.Files, + pass.Pkg, + pass.TypesInfo, + pass.ResultOf[directives.Analyzer].([]lint.Directive), + pass.ResultOf[generated.Analyzer].(map[string]generated.Generator), + DefaultOptions, + ) + g.entry() -func serializeObject(pass *analysis.Pass, fset *token.FileSet, obj types.Object) SerializedObject { - name := obj.Name() - if sig, ok := obj.Type().(*types.Signature); ok && sig.Recv() != nil { - switch sig.Recv().Type().(type) { - case *types.Named, *types.Pointer: - typ := types.TypeString(sig.Recv().Type(), func(*types.Package) string { return "" }) - if len(typ) > 0 && typ[0] == '*' { - name = fmt.Sprintf("(%s).%s", typ, obj.Name()) - } else if len(typ) > 0 { - name = fmt.Sprintf("%s.%s", typ, obj.Name()) - } - } - } - return SerializedObject{ - Name: name, - Position: fset.PositionFor(obj.Pos(), false), - DisplayPosition: report.DisplayPosition(fset, obj.Pos()), - Kind: typString(obj), - InGenerated: code.IsGenerated(pass, obj.Pos()), + sg := &SerializedGraph{ + nodes: g.nodes, } -} -func debugf(f string, v ...interface{}) { if Debug != nil { - fmt.Fprintf(Debug, f, v...) + Debug.Write([]byte(sg.Dot())) } -} -func run(pass *analysis.Pass) (interface{}, error) { - irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR) - dirs := pass.ResultOf[directives.Analyzer].([]lint.Directive) - pkg := &pkg{ - Fset: pass.Fset, - Files: pass.Files, - Pkg: pass.Pkg, - TypesInfo: pass.TypesInfo, - TypesSizes: pass.TypesSizes, - IR: irpkg.Pkg, - SrcFuncs: irpkg.SrcFuncs, - Directives: dirs, - } + return sg.Results(), nil +} - g := newGraph() - g.entry(pkg) - used, unused := results(g) +type Options struct { + FieldWritesAreUses bool + PostStatementsAreReads bool + ExportedIsUsed bool + ExportedFieldsAreUsed bool + ParametersAreUsed bool + LocalVariablesAreUsed bool + GeneratedIsUsed bool +} - if Debug != nil { - debugNode := func(n *node) { - if n.obj == nil { - debugf("n%d [label=\"Root\"];\n", n.id) - } else { - color := "red" - if n.seen { - color = "green" - } - debugf("n%d [label=%q, color=%q];\n", n.id, fmt.Sprintf("(%T) %s", n.obj, n.obj), color) - } - for _, e := range n.used { - for i := edgeKind(1); i < 64; i++ { - if e.kind.is(1 << i) { - debugf("n%d -> n%d [label=%q];\n", n.id, e.node.id, edgeKind(1< 0 && typ[0] == '*' { + name = fmt.Sprintf("(%s).%s", typ, obj.Name()) + } else if len(typ) > 0 { + name = fmt.Sprintf("%s.%s", typ, obj.Name()) + } + } + } + return Object{ + Name: name, + ShortName: obj.Name(), + Kind: typString(obj), + Path: path, + Position: g.fset.PositionFor(obj.Pos(), false), + DisplayPosition: report.DisplayPosition(g.fset, obj.Pos()), } - g.Root = g.newNode(nil) - return g } -func (g *graph) newPointer(typ types.Type) *types.Pointer { - if p, ok := g.pointers[typ]; ok { - return p - } else { - p := types.NewPointer(typ) - g.pointers[typ] = p - g.see(p) - return p +func typString(obj types.Object) string { + switch obj := obj.(type) { + case *types.Func: + return "func" + case *types.Var: + if obj.IsField() { + return "field" + } + return "var" + case *types.Const: + return "const" + case *types.TypeName: + if _, ok := obj.Type().(*types.TypeParam); ok { + return "type param" + } else { + return "type" + } + default: + return "identifier" } } -func (g *graph) color(root *node) { - if root.seen { - return +func (g *graph) newNode(obj types.Object) NodeID { + id := NodeID(len(g.nodes)) + n := Node{ + id: id, + obj: g.objectToObject(obj), } - root.seen = true - for _, e := range root.used { - g.color(e.node) + g.nodes = append(g.nodes, n) + if _, ok := g.objects[obj]; ok { + panic(fmt.Sprintf("already had a node for %s", obj)) } + g.objects[obj] = id + return id } -type constGroup struct { - // give the struct a size to get unique pointers - _ byte -} - -func (constGroup) String() string { return "const group" } - -type edge struct { - node *node - kind edgeKind -} - -type node struct { - obj interface{} - id uint64 - - // OPT(dh): evaluate using a map instead of a slice to avoid - // duplicate edges. - used []edge - - // set during final graph walk if node is reachable - seen bool - quiet bool -} - -func (g *graph) nodeMaybe(obj types.Object) (*node, bool) { - if node, ok := g.Nodes[obj]; ok { - return node, true +func (g *graph) node(obj types.Object) NodeID { + if obj == nil { + return 0 + } + obj = origin(obj) + if n, ok := g.objects[obj]; ok { + return n } - return nil, false + n := g.newNode(obj) + return n } -func (g *graph) node(obj interface{}) (n *node, new bool) { +func origin(obj types.Object) types.Object { switch obj := obj.(type) { - case types.Type: - if v := g.TypeNodes[obj]; v != nil { - return v, false - } - n = g.newNode(obj) - g.TypeNodes[obj] = n - return n, true - case types.Object: - // OPT(dh): the types.Object and default cases are identical - if node, ok := g.Nodes[obj]; ok { - return node, false - } - - n = g.newNode(obj) - g.Nodes[obj] = n - return n, true + case *types.Var: + return obj.Origin() + case *types.Func: + return obj.Origin() default: - if node, ok := g.Nodes[obj]; ok { - return node, false - } - - n = g.newNode(obj) - g.Nodes[obj] = n - return n, true + return obj } } -func (g *graph) newNode(obj interface{}) *node { - g.nodeCounter++ - return &node{ - obj: obj, - id: g.nodeCounter, +func (g *graph) addEdge(e edge) bool { + if _, ok := g.edges[e]; ok { + return false } + g.edges[e] = struct{}{} + return true } -func (n *node) use(n2 *node, kind edgeKind) { - assert(n2 != nil) - n.used = append(n.used, edge{node: n2, kind: kind}) +func (g *graph) addOwned(owner, owned NodeID) { + e := edge{owner, owned, edgeKindOwn} + if !g.addEdge(e) { + return + } + n := &g.nodes[owner] + n.owns = append(n.owns, owned) } -// isIrrelevant reports whether an object's presence in the graph is -// of any relevance. A lot of objects will never have outgoing edges, -// nor meaningful incoming ones. Examples are basic types and empty -// signatures, among many others. -// -// Dropping these objects should have no effect on correctness, but -// may improve performance. It also helps with debugging, as it -// greatly reduces the size of the graph. -func isIrrelevant(obj interface{}) bool { - if obj, ok := obj.(types.Object); ok { - switch obj := obj.(type) { - case *types.Var: - if obj.IsField() { - // We need to track package fields - return false - } - if obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() { - // We need to track package-level variables - return false - } - return isIrrelevant(obj.Type()) - default: - return false - } - } - if T, ok := obj.(types.Type); ok { - switch T := T.(type) { - case *types.Array: - return isIrrelevant(T.Elem()) - case *types.Slice: - return isIrrelevant(T.Elem()) - case *types.Basic: - return true - case *types.Tuple: - for i := 0; i < T.Len(); i++ { - if !isIrrelevant(T.At(i).Type()) { - return false - } - } - return true - case *types.Signature: - if T.Recv() != nil { - return false - } - for i := 0; i < T.Params().Len(); i++ { - if !isIrrelevant(T.Params().At(i)) { - return false - } - } - for i := 0; i < T.Results().Len(); i++ { - if !isIrrelevant(T.Results().At(i)) { - return false - } - } - return true - case *types.Interface: - return T.NumMethods() == 0 && T.NumEmbeddeds() == 0 - case *types.Pointer: - return isIrrelevant(T.Elem()) - case *types.Map: - return isIrrelevant(T.Key()) && isIrrelevant(T.Elem()) - case *types.Struct: - return T.NumFields() == 0 - case *types.Chan: - return isIrrelevant(T.Elem()) - default: - return false - } +func (g *graph) addUse(by, used NodeID) { + e := edge{by, used, edgeKindUse} + if !g.addEdge(e) { + return } - return false + nBy := &g.nodes[by] + nBy.uses = append(nBy.uses, used) } -func (g *graph) see(obj interface{}) *node { - if isIrrelevant(obj) { - return nil +func (g *graph) see(obj, owner types.Object) { + if obj == nil { + panic("saw nil object") } - assert(obj != nil) - - if fn, ok := obj.(*types.Func); ok { - obj = typeparams.OriginMethod(fn) - } - if t, ok := obj.(*types.Named); ok { - obj = typeparams.NamedTypeOrigin(t) + if g.opts.ExportedIsUsed && obj.Pkg() != g.pkg || obj.Pkg() == nil { + return } - // add new node to graph - node, _ := g.node(obj) - - if p, ok := obj.(*types.Pointer); ok { - if pt, ok := g.pointers[p.Elem()]; ok { - // We've used graph.newPointer before we saw this pointer; add an edge that marks the two pointers as being - // identical - if p != pt { - g.use(p, pt, edgeSamePointer) - g.use(pt, p, edgeSamePointer) - } - } else { - g.pointers[p.Elem()] = p - } + nObj := g.node(obj) + if owner != nil { + nOwner := g.node(owner) + g.addOwned(nOwner, nObj) } - - return node } -func (g *graph) use(used, by interface{}, kind edgeKind) { - if isIrrelevant(used) { - return +func isIrrelevant(obj types.Object) bool { + switch obj.(type) { + case *types.PkgName: + return true + default: + return false } +} - assert(used != nil) - if obj, ok := by.(types.Object); ok && obj.Pkg() != nil { - if obj.Pkg() != g.pkg.Pkg { +func (g *graph) use(used, by types.Object) { + if g.opts.ExportedIsUsed { + if used.Pkg() != g.pkg || used.Pkg() == nil { + return + } + if by != nil && by.Pkg() != g.pkg { return } } - if fn, ok := used.(*types.Func); ok { - used = typeparams.OriginMethod(fn) - } - if fn, ok := by.(*types.Func); ok { - by = typeparams.OriginMethod(fn) - } - - if t, ok := used.(*types.Named); ok { - used = typeparams.NamedTypeOrigin(t) - } - if t, ok := by.(*types.Named); ok { - by = typeparams.NamedTypeOrigin(t) - } - - usedNode, new := g.node(used) - assert(!new) - if by == nil { - g.Root.use(usedNode, kind) - } else { - byNode, new := g.node(by) - assert(!new) - byNode.use(usedNode, kind) + if isIrrelevant(used) { + return } -} -func (g *graph) seeAndUse(used, by interface{}, kind edgeKind) *node { - n := g.see(used) - g.use(used, by, kind) - return n + nUsed := g.node(used) + nBy := g.node(by) + g.addUse(nBy, nUsed) } -func (g *graph) entry(pkg *pkg) { - g.pkg = pkg - scopes := map[*types.Scope]*ir.Function{} - for _, fn := range pkg.SrcFuncs { - if fn.Object() != nil { - scope := fn.Object().(*types.Func).Scope() - scopes[scope] = fn - } - } - - for _, f := range pkg.Files { +func (g *graph) entry() { + for _, f := range g.files { for _, cg := range f.Comments { for _, c := range cg.List { if strings.HasPrefix(c.Text, "//go:linkname ") { @@ -921,300 +506,69 @@ func (g *graph) entry(pkg *pkg) { // (1.8) packages use symbols linked via go:linkname fields := strings.Fields(c.Text) if len(fields) == 3 { - if m, ok := pkg.IR.Members[fields[1]]; ok { - var obj types.Object - switch m := m.(type) { - case *ir.Global: - obj = m.Object() - case *ir.Function: - obj = m.Object() - default: - panic(fmt.Sprintf("unhandled type: %T", m)) - } - assert(obj != nil) - g.seeAndUse(obj, nil, edgeLinkname) + obj := g.pkg.Scope().Lookup(fields[1]) + if obj == nil { + continue } + g.use(obj, nil) } } } } } - surroundingFunc := func(obj types.Object) *ir.Function { - scope := obj.Parent() - for scope != nil { - if fn := scopes[scope]; fn != nil { - return fn - } - scope = scope.Parent() + for _, f := range g.files { + for _, decl := range f.Decls { + g.decl(decl, nil) } - return nil } - // IR form won't tell us about locally scoped types that aren't - // being used. Walk the list of Defs to get all named types. - // - // IR form also won't tell us about constants; use Defs and Uses - // to determine which constants exist and which are being used. - for _, obj := range pkg.TypesInfo.Defs { - switch obj := obj.(type) { - case *types.TypeName: - // types are being handled by walking the AST - case *types.Const: - g.see(obj) - fn := surroundingFunc(obj) - if fn == nil && obj.Exported() { - // (1.4) packages use exported constants - g.use(obj, nil, edgeExportedConstant) + if g.opts.GeneratedIsUsed { + // OPT(dh): depending on the options used, we do not need to track all objects. For example, if local variables + // are always used, then it is enough to use their surrounding function. + for obj := range g.objects { + path := g.fset.PositionFor(obj.Pos(), false).Filename + if _, ok := g.generated[path]; ok { + g.use(obj, nil) } - g.typ(obj.Type(), nil) - g.seeAndUse(obj.Type(), obj, edgeType) } } - // Find constants being used inside functions, find sinks in tests - for _, fn := range pkg.SrcFuncs { - if fn.Object() != nil { - g.see(fn.Object()) - } - n := fn.Source() - if n == nil { - continue - } - ast.Inspect(n, func(n ast.Node) bool { - switch n := n.(type) { - case *ast.Ident: - obj, ok := pkg.TypesInfo.Uses[n] - if !ok { - return true - } - switch obj := obj.(type) { - case *types.Const: - g.seeAndUse(obj, owningObject(fn), edgeUsedConstant) - } - case *ast.AssignStmt: - for _, expr := range n.Lhs { - ident, ok := expr.(*ast.Ident) - if !ok { - continue - } - obj := pkg.TypesInfo.ObjectOf(ident) - if obj == nil { - continue - } - path := pkg.Fset.File(obj.Pos()).Name() - if strings.HasSuffix(path, "_test.go") { - if obj.Parent() != nil && obj.Parent().Parent() != nil && obj.Parent().Parent().Parent() == nil { - // object's scope is the package, whose - // parent is the file, whose parent is nil - - // (4.9) functions use package-level variables they assign to iff in tests (sinks for benchmarks) - // (9.7) variable _reads_ use variables, writes do not, except in tests - g.seeAndUse(obj, owningObject(fn), edgeTestSink) - } - } + processMethodSet := func(named *types.TypeName, ms *types.MethodSet) { + if g.opts.ExportedIsUsed { + for i := 0; i < ms.Len(); i++ { + m := ms.At(i) + if token.IsExported(m.Obj().Name()) { + // (2.1) named types use exported methods + // (6.4) structs use embedded fields that have exported methods + // + // By reading the selection, we read all embedded fields that are part of the path + g.readSelection(m, named) } } - - return true - }) - } - // Find constants being used in non-function contexts - for _, obj := range pkg.TypesInfo.Uses { - _, ok := obj.(*types.Const) - if !ok { - continue } - g.seeAndUse(obj, nil, edgeUsedConstant) - } - var fns []*types.Func - var fn *types.Func - var stack []ast.Node - for _, f := range pkg.Files { - ast.Inspect(f, func(n ast.Node) bool { - if n == nil { - pop := stack[len(stack)-1] - stack = stack[:len(stack)-1] - if _, ok := pop.(*ast.FuncDecl); ok { - fns = fns[:len(fns)-1] - if len(fns) == 0 { - fn = nil - } else { - fn = fns[len(fns)-1] - } - } - return true - } - stack = append(stack, n) - switch n := n.(type) { - case *ast.FuncDecl: - fn = pkg.TypesInfo.ObjectOf(n.Name).(*types.Func) - fns = append(fns, fn) - g.see(fn) - case *ast.GenDecl: - switch n.Tok { - case token.CONST: - groups := astutil.GroupSpecs(pkg.Fset, n.Specs) - for _, specs := range groups { - if len(specs) > 1 { - cg := &constGroup{} - g.see(cg) - for _, spec := range specs { - for _, name := range spec.(*ast.ValueSpec).Names { - obj := pkg.TypesInfo.ObjectOf(name) - // (10.1) const groups - g.seeAndUse(obj, cg, edgeConstGroup) - g.use(cg, obj, edgeConstGroup) - } - } - } - } - case token.VAR: - for _, spec := range n.Specs { - v := spec.(*ast.ValueSpec) - for _, name := range v.Names { - T := pkg.TypesInfo.TypeOf(name) - if fn != nil { - g.seeAndUse(T, fn, edgeVarDecl) - } else { - // TODO(dh): we likely want to make - // the type used by the variable, not - // the package containing the - // variable. But then we have to take - // special care of blank identifiers. - g.seeAndUse(T, nil, edgeVarDecl) - } - g.typ(T, nil) - } - } - case token.TYPE: - for _, spec := range n.Specs { - // go/types doesn't provide a way to go from a - // types.Named to the named type it was based on - // (the t1 in type t2 t1). Therefore we walk the - // AST and process GenDecls. - // - // (2.2) named types use the type they're based on - v := spec.(*ast.TypeSpec) - T := pkg.TypesInfo.TypeOf(v.Type) - obj := pkg.TypesInfo.ObjectOf(v.Name) - g.see(obj) - g.see(T) - g.use(T, obj, edgeType) - g.typ(obj.Type(), nil) - g.typ(T, nil) - - if v.Assign != 0 { - aliasFor := obj.(*types.TypeName).Type() - // (2.3) named types use all their aliases. we can't easily track uses of aliases - if isIrrelevant(aliasFor) { - // We do not track the type this is an - // alias for (for example builtins), so - // just mark the alias used. - // - // FIXME(dh): what about aliases declared inside functions? - g.use(obj, nil, edgeAlias) - } else { - g.see(aliasFor) - g.seeAndUse(obj, aliasFor, edgeAlias) - } - } + if _, ok := named.Type().Underlying().(*types.Interface); !ok { + // (8.0) handle interfaces + // + // We don't care about interfaces implementing interfaces; all their methods are already used, anyway + for _, iface := range g.interfaceTypes { + if sels, ok := implements(named.Type(), iface, ms); ok { + for _, sel := range sels { + // (8.2) any concrete type implements all known interfaces + // (6.3) structs use embedded fields that help implement interfaces + g.readSelection(sel, named) } } } - return true - }) + } } - for _, m := range pkg.IR.Members { - switch m := m.(type) { - case *ir.NamedConst: - // nothing to do, we collect all constants from Defs - case *ir.Global: - if m.Object() != nil { - g.see(m.Object()) - if m.Object().Exported() { - // (1.3) packages use exported variables - g.use(m.Object(), nil, edgeExportedVariable) - } - } - case *ir.Function: - mObj := owningObject(m) - if mObj != nil { - g.see(mObj) - } - //lint:ignore SA9003 handled implicitly - if m.Name() == "init" { - // (1.5) packages use init functions - // - // This is handled implicitly. The generated init - // function has no object, thus everything in it will - // be owned by the package. - } - // This branch catches top-level functions, not methods. - if m.Object() != nil && m.Object().Exported() { - // (1.2) packages use exported functions - g.use(mObj, nil, edgeExportedFunction) - } - if m.Name() == "main" && pkg.Pkg.Name() == "main" { - // (1.7) packages use the main function iff in the main package - g.use(mObj, nil, edgeMainFunction) - } - if pkg.Pkg.Path() == "runtime" && runtimeFuncs[m.Name()] { - // (9.8) runtime functions that may be called from user code via the compiler - g.use(mObj, nil, edgeRuntimeFunction) - } - if m.Source() != nil { - doc := m.Source().(*ast.FuncDecl).Doc - if doc != nil { - for _, cmt := range doc.List { - if strings.HasPrefix(cmt.Text, "//go:cgo_export_") { - // (1.6) packages use functions exported to cgo - g.use(mObj, nil, edgeCgoExported) - } - } - } - } - g.function(m) - case *ir.Type: - g.see(m.Object()) - if m.Object().Exported() { - // (1.1) packages use exported named types - g.use(m.Object(), nil, edgeExportedType) - } - g.typ(m.Type(), nil) - default: - panic(fmt.Sprintf("unreachable: %T", m)) - } - } - - // OPT(dh): can we find meaningful initial capacities for these slices? - var ifaces []*types.Interface - var notIfaces []types.Type - - for t := range g.seenTypes { - switch t := t.(type) { - case *types.Interface: - // OPT(dh): (8.1) we only need interfaces that have unexported methods - ifaces = append(ifaces, t) - default: - if _, ok := t.Underlying().(*types.Interface); !ok { - notIfaces = append(notIfaces, t) - } - } - } + for _, named := range g.namedTypes { + // OPT(dh): do we already have the method set available? + processMethodSet(named, types.NewMethodSet(named.Type())) + processMethodSet(named, types.NewMethodSet(types.NewPointer(named.Type()))) - // (8.0) handle interfaces - for _, t := range notIfaces { - ms := pkg.IR.Prog.MethodSets.MethodSet(t) - for _, iface := range ifaces { - if sels, ok := g.implements(t, iface, ms); ok { - for _, sel := range sels { - g.useMethod(t, sel, t, edgeImplements) - } - } - } } type ignoredKey struct { @@ -1222,7 +576,7 @@ func (g *graph) entry(pkg *pkg) { line int } ignores := map[ignoredKey]struct{}{} - for _, dir := range pkg.Directives { + for _, dir := range g.directives { if dir.Command != "ignore" && dir.Command != "file-ignore" { continue } @@ -1231,7 +585,7 @@ func (g *graph) entry(pkg *pkg) { } for _, check := range strings.Split(dir.Arguments[0], ",") { if check == "U1000" { - pos := pkg.Fset.PositionFor(dir.Node.Pos(), false) + pos := g.fset.PositionFor(dir.Node.Pos(), false) var key ignoredKey switch dir.Command { case "ignore": @@ -1254,46 +608,43 @@ func (g *graph) entry(pkg *pkg) { if len(ignores) > 0 { // all objects annotated with a //lint:ignore U1000 are considered used - for obj := range g.Nodes { - if obj, ok := obj.(types.Object); ok { - pos := pkg.Fset.PositionFor(obj.Pos(), false) - key1 := ignoredKey{ - pos.Filename, - pos.Line, - } - key2 := ignoredKey{ - pos.Filename, - -1, - } - _, ok := ignores[key1] - if !ok { - _, ok = ignores[key2] - } - if ok { - g.use(obj, nil, edgeIgnored) - - // use methods and fields of ignored types - if obj, ok := obj.(*types.TypeName); ok { - if obj.IsAlias() { - if typ, ok := obj.Type().(*types.Named); ok && typ.Obj().Pkg() != obj.Pkg() { - // This is an alias of a named type in another package. - // Don't walk its fields or methods; we don't have to, - // and it breaks an assertion in graph.use because we're using an object that we haven't seen before. - // - // For aliases to types in the same package, we do want to ignore the fields and methods, - // because ignoring the alias should ignore the aliased type. - continue - } + for obj := range g.objects { + pos := g.fset.PositionFor(obj.Pos(), false) + key1 := ignoredKey{ + pos.Filename, + pos.Line, + } + key2 := ignoredKey{ + pos.Filename, + -1, + } + _, ok := ignores[key1] + if !ok { + _, ok = ignores[key2] + } + if ok { + g.use(obj, nil) + + // use methods and fields of ignored types + if obj, ok := obj.(*types.TypeName); ok { + if obj.IsAlias() { + if typ, ok := obj.Type().(*types.Named); ok && (g.opts.ExportedIsUsed && typ.Obj().Pkg() != obj.Pkg() || typ.Obj().Pkg() == nil) { + // This is an alias of a named type in another package. + // Don't walk its fields or methods; we don't have to. + // + // For aliases to types in the same package, we do want to ignore the fields and methods, + // because ignoring the alias should ignore the aliased type. + continue } - if typ, ok := obj.Type().(*types.Named); ok { - for i := 0; i < typ.NumMethods(); i++ { - g.use(typ.Method(i), nil, edgeIgnored) - } + } + if typ, ok := obj.Type().(*types.Named); ok { + for i := 0; i < typ.NumMethods(); i++ { + g.use(typ.Method(i), nil) } - if typ, ok := obj.Type().Underlying().(*types.Struct); ok { - for i := 0; i < typ.NumFields(); i++ { - g.use(typ.Field(i), nil, edgeIgnored) - } + } + if typ, ok := obj.Type().Underlying().(*types.Struct); ok { + for i := 0; i < typ.NumFields(); i++ { + g.use(typ.Field(i), nil) } } } @@ -1302,497 +653,792 @@ func (g *graph) entry(pkg *pkg) { } } -func (g *graph) useMethod(t types.Type, sel *types.Selection, by interface{}, kind edgeKind) { - obj := sel.Obj().(*types.Func) - path := sel.Index() - assert(obj != nil) - if len(path) > 1 { - base := typeutil.Dereference(t).Underlying().(*types.Struct) - for _, idx := range path[:len(path)-1] { - next := base.Field(idx) - // (6.3) structs use embedded fields that help implement interfaces - g.see(base) - g.seeAndUse(next, base, edgeProvidesMethod) - base, _ = typeutil.Dereference(next.Type()).Underlying().(*types.Struct) - } - } - g.seeAndUse(obj, by, kind) -} - -func owningObject(fn *ir.Function) types.Object { - if fn.Object() != nil { - return fn.Object() - } - if fn.Parent() != nil { - return owningObject(fn.Parent()) - } - return nil +func isOfType[T any](x any) bool { + _, ok := x.(T) + return ok } -func (g *graph) function(fn *ir.Function) { - assert(fn != nil) - if fn.Package() != nil && fn.Package() != g.pkg.IR { +func (g *graph) read(node ast.Node, by types.Object) { + if node == nil { return } - if _, ok := g.seenFns[fn]; ok { - return - } - g.seenFns[fn] = struct{}{} + switch node := node.(type) { + case *ast.Ident: + // Among many other things, this handles + // (7.1) field accesses use fields - // (4.1) functions use all their arguments, return parameters and receivers - g.signature(fn.Signature, owningObject(fn)) - g.instructions(fn) - for _, anon := range fn.AnonFuncs { - // (4.2) functions use anonymous functions defined beneath them - // - // This fact is expressed implicitly. Anonymous functions have - // no types.Object, so their owner is the surrounding - // function. - g.function(anon) - } -} + obj := g.info.ObjectOf(node) + g.use(obj, by) -func (g *graph) typ(t types.Type, parent types.Type) { - if _, ok := g.seenTypes[t]; ok { - return - } + case *ast.BasicLit: + // Nothing to do - if t, ok := t.(*types.Named); ok && t.Obj().Pkg() != nil { - if t.Obj().Pkg() != g.pkg.Pkg { - return + case *ast.SliceExpr: + g.read(node.X, by) + g.read(node.Low, by) + g.read(node.High, by) + g.read(node.Max, by) + + case *ast.UnaryExpr: + g.read(node.X, by) + + case *ast.ParenExpr: + g.read(node.X, by) + + case *ast.ArrayType: + g.read(node.Len, by) + g.read(node.Elt, by) + + case *ast.SelectorExpr: + g.readSelectorExpr(node, by) + + case *ast.IndexExpr: + // Among many other things, this handles + // (2.6) named types use all their type arguments + g.read(node.X, by) + g.read(node.Index, by) + + case *ast.IndexListExpr: + // Among many other things, this handles + // (2.6) named types use all their type arguments + g.read(node.X, by) + for _, index := range node.Indices { + g.read(index, by) } - } - g.seenTypes[t] = struct{}{} - if isIrrelevant(t) { - return - } - - g.see(t) - switch t := t.(type) { - case *types.Struct: - for i := 0; i < t.NumFields(); i++ { - g.see(t.Field(i)) - if t.Field(i).Exported() { - // (6.2) structs use exported fields - g.use(t.Field(i), t, edgeExportedField) - } else if t.Field(i).Name() == "_" { - g.use(t.Field(i), t, edgeBlankField) - } else if isNoCopyType(t.Field(i).Type()) { - // (6.1) structs use fields of type NoCopy sentinel - g.use(t.Field(i), t, edgeNoCopySentinel) - } else if parent == nil { - // (11.1) anonymous struct types use all their fields. - g.use(t.Field(i), t, edgeAnonymousStruct) + case *ast.BinaryExpr: + g.read(node.X, by) + g.read(node.Y, by) + + case *ast.CompositeLit: + g.read(node.Type, by) + // We get the type of the node itself, not of node.Type, to handle nested composite literals of the kind + // T{{...}} + typ, isStruct := typeutil.CoreType(g.info.TypeOf(node)).(*types.Struct) + + if isStruct { + unkeyed := len(node.Elts) != 0 && !isOfType[*ast.KeyValueExpr](node.Elts[0]) + if g.opts.FieldWritesAreUses && unkeyed { + // Untagged struct literal that specifies all fields. We have to manually use the fields in the type, + // because the unkeyd literal doesn't contain any nodes referring to the fields. + for i := 0; i < typ.NumFields(); i++ { + g.use(typ.Field(i), by) + } } - if t.Field(i).Anonymous() { - // does the embedded field contribute exported methods to the method set? - T := t.Field(i).Type() - if _, ok := T.Underlying().(*types.Pointer); !ok { - // An embedded field is addressable, so check - // the pointer type to get the full method set - T = g.newPointer(T) + if g.opts.FieldWritesAreUses || unkeyed { + for _, elt := range node.Elts { + g.read(elt, by) } - ms := g.pkg.IR.Prog.MethodSets.MethodSet(T) - for j := 0; j < ms.Len(); j++ { - if ms.At(j).Obj().Exported() { - // (6.4) structs use embedded fields that have exported methods (recursively) - g.use(t.Field(i), t, edgeExtendsExportedMethodSet) - break - } + } else { + for _, elt := range node.Elts { + kv := elt.(*ast.KeyValueExpr) + g.write(kv.Key, by) + g.read(kv.Value, by) } + } + } else { + for _, elt := range node.Elts { + g.read(elt, by) + } + } - seen := map[*types.Struct]struct{}{} - var hasExportedField func(t types.Type) bool - hasExportedField = func(T types.Type) bool { - t, ok := typeutil.Dereference(T).Underlying().(*types.Struct) - if !ok { - return false - } - if _, ok := seen[t]; ok { - return false - } - seen[t] = struct{}{} - for i := 0; i < t.NumFields(); i++ { - field := t.Field(i) - if field.Exported() { - return true - } - if field.Embedded() && hasExportedField(field.Type()) { - return true - } - } - return false - } - // does the embedded field contribute exported fields? - if hasExportedField(t.Field(i).Type()) { - // (6.5) structs use embedded structs that have exported fields (recursively) - g.use(t.Field(i), t, edgeExtendsExportedFields) - } + case *ast.KeyValueExpr: + g.read(node.Key, by) + g.read(node.Value, by) + + case *ast.StarExpr: + g.read(node.X, by) + + case *ast.MapType: + g.read(node.Key, by) + g.read(node.Value, by) + case *ast.FuncLit: + g.read(node.Type, by) + + // See graph.decl's handling of ast.FuncDecl for why this bit of code is necessary. + fn := g.info.TypeOf(node).(*types.Signature) + for params, i := fn.Params(), 0; i < params.Len(); i++ { + g.see(params.At(i), by) + if params.At(i).Name() == "" { + g.use(params.At(i), by) } - g.variable(t.Field(i)) } - case *types.Basic: - // Nothing to do - case *types.Named: - // (9.3) types use their underlying and element types - origin := typeparams.NamedTypeOrigin(t) - g.seeAndUse(origin.Underlying(), t, edgeUnderlyingType) - g.seeAndUse(t.Obj(), t, edgeTypeName) - g.seeAndUse(t, t.Obj(), edgeNamedType) - - // (2.4) named types use the pointer type - if _, ok := t.Underlying().(*types.Interface); !ok && t.NumMethods() > 0 { - g.seeAndUse(g.newPointer(origin), t, edgePointerType) + + g.block(node.Body, by) + + case *ast.FuncType: + m := map[*types.Var]struct{}{} + if !g.opts.ParametersAreUsed { + m = map[*types.Var]struct{}{} + // seeScope marks all local variables in the scope as used, but we don't want to unconditionally use + // parameters, as this is controlled by Options.ParametersAreUsed. Pass seeScope a list of variables it + // should skip. + for _, f := range node.Params.List { + for _, name := range f.Names { + m[g.info.ObjectOf(name).(*types.Var)] = struct{}{} + } + } } + g.seeScope(node, by, m) - // (2.5) named types use their type parameters + // (4.1) functions use all their arguments, return parameters and receivers + // (12.1) type parameters use their constraint type + g.read(node.TypeParams, by) + if g.opts.ParametersAreUsed { + g.read(node.Params, by) + } + g.read(node.Results, by) - for i := 0; i < typeparams.ForNamed(t).Len(); i++ { - tparam := typeparams.ForNamed(t).At(i) - g.seeAndUse(tparam, t, edgeTypeParam) - g.typ(tparam, nil) + case *ast.FieldList: + if node == nil { + return } - // (2.6) named types use their type arguments - for i := 0; i < typeparams.NamedTypeArgs(t).Len(); i++ { - targ := typeparams.NamedTypeArgs(t).At(i) - g.seeAndUse(targ, t, edgeTypeArg) - g.typ(t, nil) + // This branch is only hit for field lists enclosed by parentheses or square brackets, i.e. parameters. Fields + // (for structs) and method lists (for interfaces) are handled elsewhere. + + for _, field := range node.List { + if len(field.Names) == 0 { + g.read(field.Type, by) + } else { + for _, name := range field.Names { + // OPT(dh): instead of by -> name -> type, we could just emit by -> type. We don't care about the + // (un)usedness of parameters of any kind. + obj := g.info.ObjectOf(name) + g.use(obj, by) + g.read(field.Type, obj) + } + } } - for i := 0; i < t.NumMethods(); i++ { - g.see(t.Method(i)) - // don't use trackExportedIdentifier here, we care about - // all exported methods, even in package main or in tests. - if t.Method(i).Exported() { - // (2.1) named types use exported methods - g.use(t.Method(i), t, edgeExportedMethod) + case *ast.ChanType: + g.read(node.Value, by) + + case *ast.StructType: + // This is only used for anonymous struct types, not named ones. + + for _, field := range node.Fields.List { + if len(field.Names) == 0 { + // embedded field + + f := g.embeddedField(field.Type, by) + g.use(f, by) + } else { + for _, name := range field.Names { + // (11.1) anonymous struct types use all their fields + // OPT(dh): instead of by -> name -> type, we could just emit by -> type. If the type is used, then the fields are used. + obj := g.info.ObjectOf(name) + g.see(obj, by) + g.use(obj, by) + g.read(field.Type, g.info.ObjectOf(name)) + } } - g.function(g.pkg.IR.Prog.FuncValue(t.Method(i))) } - g.typ(origin.Underlying(), t) - case *types.Slice: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Map: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - // (9.3) types use their underlying and element types - g.seeAndUse(t.Key(), t, edgeKeyType) - g.typ(t.Elem(), nil) - g.typ(t.Key(), nil) - case *types.Signature: - g.signature(t, nil) - case *types.Interface: - for i := 0; i < t.NumMethods(); i++ { - m := t.Method(i) - // (8.3) All interface methods are marked as used - g.seeAndUse(m, t, edgeInterfaceMethod) - g.seeAndUse(m.Type().(*types.Signature), m, edgeSignature) - g.signature(m.Type().(*types.Signature), nil) + case *ast.TypeAssertExpr: + g.read(node.X, by) + g.read(node.Type, by) + + case *ast.InterfaceType: + if len(node.Methods.List) != 0 { + g.interfaceTypes = append(g.interfaceTypes, g.info.TypeOf(node).(*types.Interface)) } - for i := 0; i < t.NumEmbeddeds(); i++ { - tt := t.EmbeddedType(i) - // (8.4) All embedded interfaces are marked as used - g.typ(tt, nil) - g.seeAndUse(tt, t, edgeEmbeddedInterface) + for _, meth := range node.Methods.List { + switch len(meth.Names) { + case 0: + // Embedded type or type union + // (8.4) all embedded interfaces are marked as used + // (this also covers type sets) + + g.read(meth.Type, by) + case 1: + // Method + // (8.3) all interface methods are marked as used + obj := g.info.ObjectOf(meth.Names[0]) + g.see(obj, by) + g.use(obj, by) + g.read(meth.Type, obj) + default: + panic(fmt.Sprintf("unexpected number of names: %d", len(meth.Names))) + } } - case *types.Array: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Pointer: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Chan: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *types.Tuple: - for i := 0; i < t.Len(); i++ { - // (9.3) types use their underlying and element types - g.seeAndUse(t.At(i).Type(), t, edgeTupleElement|edgeType) - g.typ(t.At(i).Type(), nil) + + case *ast.Ellipsis: + g.read(node.Elt, by) + + case *ast.CallExpr: + g.read(node.Fun, by) + for _, arg := range node.Args { + g.read(arg, by) } - case *typeutil.Iterator: - // (9.3) types use their underlying and element types - g.seeAndUse(t.Elem(), t, edgeElementType) - g.typ(t.Elem(), nil) - case *typeparams.TypeParam: - // (9.3) types use their underlying and element types - - g.seeAndUse(t.Obj(), t, edgeTypeName) - g.seeAndUse(t, t.Obj(), edgeNamedType) - g.seeAndUse(t.Constraint(), t, edgeElementType) - g.typ(t.Constraint(), t) - case *typeparams.Union: - for i := 0; i < t.Len(); i++ { - g.seeAndUse(t.Term(i).Type(), t, edgeUnionTerm) - g.typ(t.Term(i).Type(), nil) + + // Handle conversiosn + conv := node + if len(conv.Args) != 1 || conv.Ellipsis.IsValid() { + return + } + + dst := g.info.TypeOf(conv.Fun) + src := g.info.TypeOf(conv.Args[0]) + + // XXX use DereferenceR instead + // XXX guard against infinite recursion in DereferenceR + tSrc := typeutil.CoreType(typeutil.Dereference(src)) + tDst := typeutil.CoreType(typeutil.Dereference(dst)) + stSrc, okSrc := tSrc.(*types.Struct) + stDst, okDst := tDst.(*types.Struct) + if okDst && okSrc { + // Converting between two structs. The fields are + // relevant for the conversion, but only if the + // fields are also used outside of the conversion. + // Mark fields as used by each other. + + assert(stDst.NumFields() == stSrc.NumFields()) + for i := 0; i < stDst.NumFields(); i++ { + // (5.1) when converting between two equivalent structs, the fields in + // either struct use each other. the fields are relevant for the + // conversion, but only if the fields are also accessed outside the + // conversion. + g.use(stDst.Field(i), stSrc.Field(i)) + g.use(stSrc.Field(i), stDst.Field(i)) + } + } else if okSrc && tDst == types.Typ[types.UnsafePointer] { + // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + g.useAllFieldsRecursively(stSrc, by) + } else if okDst && tSrc == types.Typ[types.UnsafePointer] { + // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. + g.useAllFieldsRecursively(stDst, by) } + default: - panic(fmt.Sprintf("unreachable: %T", t)) + lint.ExhaustiveTypeSwitch(node) } } -func (g *graph) variable(v *types.Var) { - // (9.2) variables use their types - g.seeAndUse(v.Type(), v, edgeType) - g.typ(v.Type(), nil) +func (g *graph) useAllFieldsRecursively(typ types.Type, by types.Object) { + switch typ := typ.Underlying().(type) { + case *types.Struct: + for i := 0; i < typ.NumFields(); i++ { + field := typ.Field(i) + g.use(field, by) + g.useAllFieldsRecursively(field.Type(), by) + } + case *types.Array: + g.useAllFieldsRecursively(typ.Elem(), by) + default: + return + } } -func (g *graph) signature(sig *types.Signature, fn types.Object) { - var user interface{} = fn - if fn == nil { - user = sig - g.see(sig) +func (g *graph) write(node ast.Node, by types.Object) { + if node == nil { + return } - if sig.Recv() != nil { - g.seeAndUse(sig.Recv().Type(), user, edgeReceiver|edgeType) - g.typ(sig.Recv().Type(), nil) + + switch node := node.(type) { + case *ast.Ident: + obj := g.info.ObjectOf(node) + if obj == nil { + // This can happen for `switch x := v.(type)`, where that x doesn't have an object + return + } + + // (4.9) functions use package-level variables they assign to iff in tests (sinks for benchmarks) + // (9.7) variable _reads_ use variables, writes do not, except in tests + path := g.fset.File(obj.Pos()).Name() + if strings.HasSuffix(path, "_test.go") { + if isGlobal(obj) { + g.use(obj, by) + } + } + + case *ast.IndexExpr: + g.read(node.X, by) + g.read(node.Index, by) + + case *ast.SelectorExpr: + if g.opts.FieldWritesAreUses { + // Writing to a field constitutes a use. See https://staticcheck.io/issues/288 for some discussion on that. + // + // This code can also get triggered by qualified package variables, in which case it doesn't matter what we do, + // because the object is in another package. + // + // FIXME(dh): ^ isn't true if we track usedness of exported identifiers + g.readSelectorExpr(node, by) + } else { + g.read(node.X, by) + g.write(node.Sel, by) + } + + case *ast.StarExpr: + g.read(node.X, by) + + case *ast.ParenExpr: + g.write(node.X, by) + + default: + lint.ExhaustiveTypeSwitch(node) } - for i := 0; i < sig.Params().Len(); i++ { - param := sig.Params().At(i) - g.seeAndUse(param.Type(), user, edgeFunctionArgument|edgeType) - g.typ(param.Type(), nil) +} + +// readSelectorExpr reads all elements of a selector expression, including implicit fields. +func (g *graph) readSelectorExpr(sel *ast.SelectorExpr, by types.Object) { + // cover AST-based accesses + g.read(sel.X, by) + g.read(sel.Sel, by) + + tsel, ok := g.info.Selections[sel] + if !ok { + return } - for i := 0; i < sig.Results().Len(); i++ { - param := sig.Results().At(i) - g.seeAndUse(param.Type(), user, edgeFunctionResult|edgeType) - g.typ(param.Type(), nil) + g.readSelection(tsel, by) +} + +func (g *graph) readSelection(sel *types.Selection, by types.Object) { + indices := sel.Index() + base := sel.Recv() + for _, idx := range indices[:len(indices)-1] { + // XXX do we need core types here? + field := typeutil.Dereference(base.Underlying()).Underlying().(*types.Struct).Field(idx) + g.use(field, by) + base = field.Type() } - for i := 0; i < typeparams.RecvTypeParams(sig).Len(); i++ { - // We track the type parameter's constraint, not the type parameter itself. - // We never want to flag an unused type parameter. - param := typeparams.RecvTypeParams(sig).At(i).Constraint() - g.seeAndUse(param, user, edgeFunctionArgument|edgeType) - g.typ(param, nil) + + g.use(sel.Obj(), by) +} + +func (g *graph) block(block *ast.BlockStmt, by types.Object) { + if block == nil { + return } - for i := 0; i < typeparams.ForSignature(sig).Len(); i++ { - // We track the type parameter's constraint, not the type parameter itself. - // We never want to flag an unused type parameter. - param := typeparams.ForSignature(sig).At(i).Constraint() - g.seeAndUse(param, user, edgeFunctionArgument|edgeType) - g.typ(param, nil) + + g.seeScope(block, by, nil) + for _, stmt := range block.List { + g.stmt(stmt, by) } } -func (g *graph) instructions(fn *ir.Function) { - fnObj := owningObject(fn) - for _, b := range fn.Blocks { - for _, instr := range b.Instrs { - ops := instr.Operands(nil) - switch instr.(type) { - case *ir.Store: - // (9.7) variable _reads_ use variables, writes do not - ops = ops[1:] - case *ir.DebugRef: - ops = nil +func isGlobal(obj types.Object) bool { + return obj.Parent() == obj.Pkg().Scope() +} + +func (g *graph) decl(decl ast.Decl, by types.Object) { + switch decl := decl.(type) { + case *ast.GenDecl: + switch decl.Tok { + case token.IMPORT: + // Nothing to do + + case token.CONST: + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) + assert(len(vspec.Values) == 0 || len(vspec.Values) == len(vspec.Names)) + for i, name := range vspec.Names { + obj := g.info.ObjectOf(name) + g.see(obj, by) + g.read(vspec.Type, obj) + + if len(vspec.Values) != 0 { + g.read(vspec.Values[i], obj) + } + + if name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, by) + } else if token.IsExported(name.Name) && isGlobal(obj) && g.opts.ExportedIsUsed { + g.use(obj, nil) + } + } } - for _, arg := range ops { - walkPhi(*arg, func(v ir.Value) { - switch v := v.(type) { - case *ir.Function: - // (4.3) functions use closures and bound methods. - // (4.5) functions use functions they call - // (9.5) instructions use their operands - // (4.4) functions use functions they return. we assume that someone else will call the returned function - if owningObject(v) != nil { - g.seeAndUse(owningObject(v), fnObj, edgeInstructionOperand) + + groups := astutil.GroupSpecs(g.fset, decl.Specs) + for _, group := range groups { + // (10.1) if one constant out of a block of constants is used, mark all of them used + // + // We encode this as a ring. If we have a constant group 'const ( a; b; c )', then we'll produce the + // following graph: a -> b -> c -> a. + + var first, prev, last types.Object + for _, spec := range group { + for _, name := range spec.(*ast.ValueSpec).Names { + if name.Name == "_" { + // Having a blank constant in a group doesn't mark the whole group as used + continue } - g.function(v) - case *ir.Const: - // (9.6) instructions use their operands' types - g.seeAndUse(v.Type(), fnObj, edgeType) - g.typ(v.Type(), nil) - case *ir.Global: - if v.Object() != nil { - // (9.5) instructions use their operands - g.seeAndUse(v.Object(), fnObj, edgeInstructionOperand) + + obj := g.info.ObjectOf(name) + if first == nil { + first = obj + } else { + g.use(obj, prev) } + prev = obj + last = obj } - }) - } - if v, ok := instr.(ir.Value); ok { - if _, ok := v.(*ir.Range); !ok { - // See https://github.com/golang/go/issues/19670 - - // (4.8) instructions use their types - // (9.4) conversions use the type they convert to - g.seeAndUse(v.Type(), fnObj, edgeType) - g.typ(v.Type(), nil) + } + if first != nil && first != last { + g.use(first, last) } } - switch instr := instr.(type) { - case *ir.Field: - // Can't access fields via generics, for now. - - st := instr.X.Type().Underlying().(*types.Struct) - field := st.Field(instr.Field) - // (4.7) functions use fields they access - g.seeAndUse(field, fnObj, edgeFieldAccess) - case *ir.FieldAddr: - // User code can't access fields on type parameters, but composite literals are still possible, which - // compile to FieldAddr + Store. - - st := typeutil.CoreType(typeutil.Dereference(instr.X.Type())).(*types.Struct) - field := st.Field(instr.Field) - // (4.7) functions use fields they access - g.seeAndUse(field, fnObj, edgeFieldAccess) - case *ir.Store: - // nothing to do, handled generically by operands - case ir.CallInstruction: - c := instr.Common() - for _, targ := range c.TypeArgs { - g.seeAndUse(targ, fnObj, edgeTypeArg) + + case token.TYPE: + for _, spec := range decl.Specs { + tspec := spec.(*ast.TypeSpec) + obj := g.info.ObjectOf(tspec.Name).(*types.TypeName) + g.see(obj, by) + g.seeScope(tspec, obj, nil) + if !tspec.Assign.IsValid() { + g.namedTypes = append(g.namedTypes, obj) } - if !c.IsInvoke() { - // handled generically as an instruction operand - } else { - // (4.5) functions use functions/interface methods they call - g.seeAndUse(c.Method, fnObj, edgeInterfaceCall) + if token.IsExported(tspec.Name.Name) && isGlobal(obj) && g.opts.ExportedIsUsed { + // (1.1) packages use exported named types + g.use(g.info.ObjectOf(tspec.Name), nil) } - case *ir.Return: - // nothing to do, handled generically by operands - case *ir.ChangeType: - // conversion type handled generically - - s1, ok1 := typeutil.CoreType(typeutil.Dereference(instr.Type())).(*types.Struct) - s2, ok2 := typeutil.CoreType(typeutil.Dereference(instr.X.Type())).(*types.Struct) - if ok1 && ok2 { - // Converting between two structs. The fields are - // relevant for the conversion, but only if the - // fields are also used outside of the conversion. - // Mark fields as used by each other. - - assert(s1.NumFields() == s2.NumFields()) - for i := 0; i < s1.NumFields(); i++ { - g.see(s1.Field(i)) - g.see(s2.Field(i)) - // (5.1) when converting between two equivalent structs, the fields in - // either struct use each other. the fields are relevant for the - // conversion, but only if the fields are also accessed outside the - // conversion. - g.seeAndUse(s1.Field(i), s2.Field(i), edgeStructConversion) - g.seeAndUse(s2.Field(i), s1.Field(i), edgeStructConversion) - } + + // (2.5) named types use all their type parameters + g.read(tspec.TypeParams, obj) + + g.namedType(obj, tspec.Type) + + if tspec.Name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, by) } - case *ir.MakeInterface: - // nothing to do, handled generically by operands - case *ir.Slice: - // nothing to do, handled generically by operands - case *ir.RunDefers: - // nothing to do, the deferred functions are already marked use by deferring them. - case *ir.Convert: - // to unsafe.Pointer - if typ, ok := instr.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer { - if ptr, ok := instr.X.Type().Underlying().(*types.Pointer); ok { - if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { - for i := 0; i < st.NumFields(); i++ { - // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. - g.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) - } + } + + case token.VAR: + // We cannot rely on types.Initializer for package-level variables because + // - initializers are only tracked for variables that are actually initialized + // - we want to see the AST of the type, if specified, not just the rhs + + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) + for i, name := range vspec.Names { + obj := g.info.ObjectOf(name) + g.see(obj, by) + // variables and constants use their types + g.read(vspec.Type, obj) + + if len(vspec.Names) == len(vspec.Values) { + // One value per variable + g.read(vspec.Values[i], obj) + } else if len(vspec.Values) != 0 { + // Multiple variables initialized with a single rhs + // assert(len(vspec.Values) == 1) + if len(vspec.Values) != 1 { + panic(g.fset.PositionFor(vspec.Pos(), false)) } + g.read(vspec.Values[0], obj) } - } - // from unsafe.Pointer - if typ, ok := instr.X.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer { - if ptr, ok := instr.Type().Underlying().(*types.Pointer); ok { - if st, ok := ptr.Elem().Underlying().(*types.Struct); ok { - for i := 0; i < st.NumFields(); i++ { - // (5.2) when converting to or from unsafe.Pointer, mark all fields as used. - g.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion) - } - } + + if token.IsExported(name.Name) && isGlobal(obj) && g.opts.ExportedIsUsed { + // (1.3) packages use exported variables + g.use(obj, nil) + } + + if name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, by) } } - case *ir.TypeAssert: - // nothing to do, handled generically by instruction - // type (possibly a tuple, which contains the asserted - // to type). redundantly handled by the type of - // ir.Extract, too - case *ir.MakeClosure: - // nothing to do, handled generically by operands - case *ir.Alloc: - // nothing to do - case *ir.UnOp: - // nothing to do - case *ir.BinOp: - // nothing to do - case *ir.If: - // nothing to do - case *ir.Jump: - // nothing to do - case *ir.Unreachable: - // nothing to do - case *ir.IndexAddr: - // nothing to do - case *ir.Extract: - // nothing to do - case *ir.Panic: - // nothing to do - case *ir.DebugRef: - // nothing to do - case *ir.BlankStore: - // nothing to do - case *ir.Phi: - // nothing to do - case *ir.Sigma: - // nothing to do - case *ir.MakeMap: - // nothing to do - case *ir.MapUpdate: - // nothing to do - case *ir.MapLookup: - // nothing to do - case *ir.StringLookup: - // nothing to do - case *ir.MakeSlice: - // nothing to do - case *ir.Send: - // nothing to do - case *ir.MakeChan: - // nothing to do - case *ir.Range: - // nothing to do - case *ir.Next: - // nothing to do - case *ir.Index: - // nothing to do - case *ir.Select: - // nothing to do - case *ir.ChangeInterface: - // nothing to do - case *ir.Load: - // nothing to do - case *ir.Parameter: - // nothing to do - case *ir.Const: - // nothing to do - case *ir.ArrayConst: - // nothing to do - case *ir.AggregateConst: - // nothing to do - case *ir.GenericConst: - // nothing to do - case *ir.Recv: - // nothing to do - case *ir.TypeSwitch: - // nothing to do - case *ir.ConstantSwitch: - // nothing to do - case *ir.SliceToArrayPointer: - // nothing to do + } + + default: + panic(fmt.Sprintf("unexpected token %s", decl.Tok)) + } + + case *ast.FuncDecl: + obj := g.info.ObjectOf(decl.Name).(*types.Func).Origin() + g.see(obj, nil) + + if token.IsExported(decl.Name.Name) && g.opts.ExportedIsUsed { + if decl.Recv == nil { + // (1.2) packages use exported functions + g.use(obj, nil) + } + } else if decl.Name.Name == "init" { + // (1.5) packages use init functions + g.use(obj, nil) + } else if decl.Name.Name == "main" && g.pkg.Name() == "main" { + // (1.7) packages use the main function iff in the main package + g.use(obj, nil) + } else if g.pkg.Path() == "runtime" && runtimeFuncs[decl.Name.Name] { + // (9.8) runtime functions that may be called from user code via the compiler + g.use(obj, nil) + } else if g.pkg.Path() == "runtime/coverage" && runtimeCoverageFuncs[decl.Name.Name] { + // (9.8) runtime functions that may be called from user code via the compiler + g.use(obj, nil) + } + + // (4.1) functions use their receivers + g.read(decl.Recv, obj) + g.read(decl.Type, obj) + g.block(decl.Body, obj) + + // g.read(decl.Type) will ultimately call g.seeScopes and see parameters that way. But because it relies + // entirely on the AST, it cannot resolve unnamed parameters to types.Object. For that reason we explicitly + // handle arguments here, as well as for FuncLits elsewhere. + // + // g.seeScopes can't get to the types.Signature for this function because there is no mapping from ast.FuncType to + // types.Signature, only from ast.Ident to types.Signature. + // + // This code is only really relevant when Options.ParametersAreUsed is false. Otherwise, all parameters are + // considered used, and if we never see a parameter then no harm done (we still see its type separately). + fn := g.info.TypeOf(decl.Name).(*types.Signature) + for params, i := fn.Params(), 0; i < params.Len(); i++ { + g.see(params.At(i), obj) + if params.At(i).Name() == "" { + g.use(params.At(i), obj) + } + } + + if decl.Name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, nil) + } + + if decl.Doc != nil { + for _, cmt := range decl.Doc.List { + if strings.HasPrefix(cmt.Text, "//go:cgo_export_") { + // (1.6) packages use functions exported to cgo + g.use(obj, nil) + } + } + } + + default: + // We do not cover BadDecl, but we shouldn't ever see one of those + lint.ExhaustiveTypeSwitch(decl) + } +} + +// seeScope sees all objects in node's scope. If Options.LocalVariablesAreUsed is true, all objects that aren't fields +// are marked as used. Variables set in skipLvars will not be marked as used. +func (g *graph) seeScope(node ast.Node, by types.Object, skipLvars map[*types.Var]struct{}) { + // A note on functions and scopes: for a function declaration, the body's BlockStmt can't be found in + // types.Info.Scopes. Instead, the FuncType can, and that scope will contain receivers, parameters, return + // parameters and immediate local variables. + + scope := g.info.Scopes[node] + if scope == nil { + return + } + for _, name := range scope.Names() { + obj := scope.Lookup(name) + g.see(obj, by) + + if g.opts.LocalVariablesAreUsed { + if obj, ok := obj.(*types.Var); ok && !obj.IsField() { + if _, ok := skipLvars[obj]; !ok { + g.use(obj, by) + } + } + } + } +} + +func (g *graph) stmt(stmt ast.Stmt, by types.Object) { + if stmt == nil { + return + } + + for { + // We don't care about labels, so unwrap LabeledStmts. Note that a label can itself be labeled. + if labeled, ok := stmt.(*ast.LabeledStmt); ok { + stmt = labeled.Stmt + } else { + break + } + } + + switch stmt := stmt.(type) { + case *ast.AssignStmt: + for _, lhs := range stmt.Lhs { + g.write(lhs, by) + } + for _, rhs := range stmt.Rhs { + // Note: it would be more accurate to have the rhs used by the lhs, but it ultimately doesn't matter, + // because local variables always end up used, anyway. + // + // TODO(dh): we'll have to change that once we allow tracking the usedness of parameters + g.read(rhs, by) + } + + case *ast.BlockStmt: + g.block(stmt, by) + + case *ast.BranchStmt: + // Nothing to do + + case *ast.DeclStmt: + g.decl(stmt.Decl, by) + + case *ast.DeferStmt: + g.read(stmt.Call, by) + + case *ast.ExprStmt: + g.read(stmt.X, by) + + case *ast.ForStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.read(stmt.Cond, by) + g.stmt(stmt.Post, by) + g.block(stmt.Body, by) + + case *ast.GoStmt: + g.read(stmt.Call, by) + + case *ast.IfStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.read(stmt.Cond, by) + g.block(stmt.Body, by) + g.stmt(stmt.Else, by) + + case *ast.IncDecStmt: + if g.opts.PostStatementsAreReads { + g.read(stmt.X, by) + g.write(stmt.X, by) + } else { + // We treat post-increment as a write only. This ends up using fields, and sinks in tests, but not other + // variables. + g.write(stmt.X, by) + } + + case *ast.RangeStmt: + g.seeScope(stmt, by, nil) + + g.write(stmt.Key, by) + g.write(stmt.Value, by) + g.read(stmt.X, by) + g.block(stmt.Body, by) + + case *ast.ReturnStmt: + for _, ret := range stmt.Results { + g.read(ret, by) + } + + case *ast.SelectStmt: + for _, clause_ := range stmt.Body.List { + clause := clause_.(*ast.CommClause) + g.seeScope(clause, by, nil) + switch comm := clause.Comm.(type) { + case *ast.SendStmt: + g.read(comm.Chan, by) + g.read(comm.Value, by) + case *ast.ExprStmt: + g.read(astutil.Unparen(comm.X).(*ast.UnaryExpr).X, by) + case *ast.AssignStmt: + for _, lhs := range comm.Lhs { + g.write(lhs, by) + } + for _, rhs := range comm.Rhs { + g.read(rhs, by) + } + case nil: default: - lint.ExhaustiveTypeSwitch(instr) + lint.ExhaustiveTypeSwitch(comm) + } + for _, body := range clause.Body { + g.stmt(body, by) + } + } + + case *ast.SendStmt: + g.read(stmt.Chan, by) + g.read(stmt.Value, by) + + case *ast.SwitchStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.read(stmt.Tag, by) + for _, clause_ := range stmt.Body.List { + clause := clause_.(*ast.CaseClause) + g.seeScope(clause, by, nil) + for _, expr := range clause.List { + g.read(expr, by) + } + for _, body := range clause.Body { + g.stmt(body, by) } } + + case *ast.TypeSwitchStmt: + g.seeScope(stmt, by, nil) + g.stmt(stmt.Init, by) + g.stmt(stmt.Assign, by) + for _, clause_ := range stmt.Body.List { + clause := clause_.(*ast.CaseClause) + g.seeScope(clause, by, nil) + for _, expr := range clause.List { + g.read(expr, by) + } + for _, body := range clause.Body { + g.stmt(body, by) + } + } + + case *ast.EmptyStmt: + // Nothing to do + + default: + lint.ExhaustiveTypeSwitch(stmt) + } +} + +// embeddedField sees the field declared by the embedded field node, and marks the type as used by the field. +// +// Embedded fields are special in two ways: they don't have names, so we don't have immediate access to an ast.Ident to +// resolve to the field's types.Var and need to instead walk the AST, and we cannot use g.read on the type because +// eventually we do get to an ast.Ident, and ObjectOf resolves embedded fields to the field they declare, not the type. +// That's why we have code specially for handling embedded fields. +func (g *graph) embeddedField(node ast.Node, by types.Object) *types.Var { + // We need to traverse the tree to find the ast.Ident, but all the nodes we traverse should be used by the object we + // get once we resolve the ident. Collect the nodes and process them once we've found the ident. + nodes := make([]ast.Node, 0, 4) + for { + switch node_ := node.(type) { + case *ast.Ident: + // obj is the field + obj := g.info.ObjectOf(node_).(*types.Var) + // the field is declared by the enclosing type + g.see(obj, by) + for _, n := range nodes { + g.read(n, obj) + } + + if tname, ok := g.info.Uses[node_].(*types.TypeName); ok && tname.IsAlias() { + // When embedding an alias we want to use the alias, not what the alias points to. + g.use(tname, obj) + } else { + switch typ := typeutil.Dereference(g.info.TypeOf(node_)).(type) { + case *types.Named: + // (7.2) fields use their types + g.use(typ.Obj(), obj) + case *types.Basic: + // Nothing to do + default: + // Other types are only possible for aliases, which we've already handled + lint.ExhaustiveTypeSwitch(typ) + } + } + return obj + case *ast.StarExpr: + node = node_.X + case *ast.SelectorExpr: + node = node_.Sel + nodes = append(nodes, node_.X) + case *ast.IndexExpr: + node = node_.X + nodes = append(nodes, node_.Index) + case *ast.IndexListExpr: + node = node_.X + default: + lint.ExhaustiveTypeSwitch(node_) + } } } @@ -1815,41 +1461,219 @@ func isNoCopyType(typ types.Type) bool { if !ok { return false } - if named.NumMethods() != 1 { - return false - } - meth := named.Method(0) - if meth.Name() != "Lock" { - return false - } - sig := meth.Type().(*types.Signature) - if sig.Params().Len() != 0 || sig.Results().Len() != 0 { + switch num := named.NumMethods(); num { + case 1, 2: + for i := 0; i < num; i++ { + meth := named.Method(i) + if meth.Name() != "Lock" && meth.Name() != "Unlock" { + return false + } + sig := meth.Type().(*types.Signature) + if sig.Params().Len() != 0 || sig.Results().Len() != 0 { + return false + } + } + default: return false } return true } -func walkPhi(v ir.Value, fn func(v ir.Value)) { - phi, ok := v.(*ir.Phi) - if !ok { - fn(v) +func (g *graph) namedType(typ *types.TypeName, spec ast.Expr) { + // (2.2) named types use the type they're based on + + if st, ok := spec.(*ast.StructType); ok { + // Named structs are special in that its unexported fields are only used if they're being written to. That is, + // the fields are not used by the named type itself, nor are the types of the fields. + for _, field := range st.Fields.List { + seen := map[*types.Struct]struct{}{} + // For `type x struct { *x; F int }`, don't visit the embedded x + seen[g.info.TypeOf(st).(*types.Struct)] = struct{}{} + var hasExportedField func(t types.Type) bool + hasExportedField = func(T types.Type) bool { + t, ok := typeutil.Dereference(T).Underlying().(*types.Struct) + if !ok { + return false + } + if _, ok := seen[t]; ok { + return false + } + seen[t] = struct{}{} + for i := 0; i < t.NumFields(); i++ { + field := t.Field(i) + if field.Exported() { + return true + } + if field.Embedded() && hasExportedField(field.Type()) { + return true + } + } + return false + } + + if len(field.Names) == 0 { + fieldVar := g.embeddedField(field.Type, typ) + if token.IsExported(fieldVar.Name()) && g.opts.ExportedIsUsed { + // (6.2) structs use exported fields + g.use(fieldVar, typ) + } + if g.opts.ExportedIsUsed && g.opts.ExportedFieldsAreUsed && hasExportedField(fieldVar.Type()) { + // (6.5) structs use embedded structs that have exported fields (recursively) + g.use(fieldVar, typ) + } + } else { + for _, name := range field.Names { + obj := g.info.ObjectOf(name) + g.see(obj, typ) + // (7.2) fields use their types + // + // This handles aliases correctly because ObjectOf(alias) returns the TypeName of the alias, not + // what the alias points to. + g.read(field.Type, obj) + if name.Name == "_" { + // (9.9) objects named the blank identifier are used + g.use(obj, typ) + } else if token.IsExported(name.Name) && g.opts.ExportedIsUsed { + // (6.2) structs use exported fields + g.use(obj, typ) + } + + if isNoCopyType(obj.Type()) { + // (6.1) structs use fields of type NoCopy sentinel + g.use(obj, typ) + } + } + } + + } + } else { + g.read(spec, typ) + } +} + +func (g *SerializedGraph) color(rootID NodeID, states []nodeState) { + root := g.nodes[rootID] + if states[rootID].seen() { return } + states[rootID] |= nodeStateSeen + for _, n := range root.uses { + g.color(n, states) + } +} - seen := map[ir.Value]struct{}{} - var impl func(v *ir.Phi) - impl = func(v *ir.Phi) { - if _, ok := seen[v]; ok { - return +type Object struct { + Name string + ShortName string + // OPT(dh): use an enum for the kind + Kind string + Path ObjectPath + Position token.Position + DisplayPosition token.Position +} + +func (g *SerializedGraph) Results() Result { + // XXX objectpath does not return paths for unexported objects, which means that if we analyze the same code twice + // (e.g. normal and test variant), then some objects will appear multiple times, but may not be used identically. we + // have to deduplicate based on the token.Position. Actually we have to do that, anyway, because we may flag types + // local to functions. Those are probably always both used or both unused, but we don't want to flag them twice, + // either. + // + // Note, however, that we still need objectpaths to deduplicate exported identifiers when analyzing independent + // packages in whole-program mode, because if package A uses an object from package B, B will have been imported + // from export data, and we will not have column information. + // + // XXX ^ document that design requirement. + + states := g.colorAndQuieten() + + var res Result + // OPT(dh): can we find meaningful initial capacities for the used and unused slices? + for _, n := range g.nodes[1:] { + state := states[n.id] + if state.seen() { + res.Used = append(res.Used, n.obj) + } else if state.quiet() { + res.Quiet = append(res.Quiet, n.obj) + } else { + res.Unused = append(res.Unused, n.obj) } - seen[v] = struct{}{} - for _, e := range v.Edges { - if ev, ok := e.(*ir.Phi); ok { - impl(ev) - } else { - fn(e) + } + + return res +} + +func (g *SerializedGraph) colorAndQuieten() []nodeState { + states := make([]nodeState, len(g.nodes)+1) + g.color(0, states) + + var quieten func(id NodeID) + quieten = func(id NodeID) { + states[id] |= nodeStateQuiet + for _, owned := range g.nodes[id].owns { + quieten(owned) + } + } + + for _, n := range g.nodes { + if states[n.id].seen() { + continue + } + for _, owned := range n.owns { + quieten(owned) + } + } + + return states +} + +// Dot formats a graph in Graphviz dot format. +func (g *SerializedGraph) Dot() string { + b := &strings.Builder{} + states := g.colorAndQuieten() + // Note: We use addresses in our node names. This only works as long as Go's garbage collector doesn't move + // memory around in the middle of our debug printing. + debugNode := func(n Node) { + if n.id == 0 { + fmt.Fprintf(b, "n%d [label=\"Root\"];\n", n.id) + } else { + color := "red" + if states[n.id].seen() { + color = "green" + } else if states[n.id].quiet() { + color = "grey" } + label := fmt.Sprintf("%s %s\n%s", n.obj.Kind, n.obj.Name, n.obj.Position) + fmt.Fprintf(b, "n%d [label=%q, color=%q];\n", n.id, label, color) } + for _, e := range n.uses { + fmt.Fprintf(b, "n%d -> n%d;\n", n.id, e) + } + + for _, owned := range n.owns { + fmt.Fprintf(b, "n%d -> n%d [style=dashed];\n", n.id, owned) + } + } + + fmt.Fprintf(b, "digraph{\n") + for _, v := range g.nodes { + debugNode(v) } - impl(phi) + + fmt.Fprintf(b, "}\n") + + return b.String() +} + +func Graph(fset *token.FileSet, + files []*ast.File, + pkg *types.Package, + info *types.Info, + directives []lint.Directive, + generated map[string]generated.Generator, + opts Options, +) []Node { + g := newGraph(fset, files, pkg, info, directives, generated, opts) + g.entry() + return g.nodes } diff --git a/vendor/modules.txt b/vendor/modules.txt index a0a0cb5d08..bc9fa9d0c2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,35 +1,63 @@ -# 4d63.com/gochecknoglobals v0.1.0 +# 4d63.com/gocheckcompilerdirectives v1.2.1 +## explicit; go 1.19 +4d63.com/gocheckcompilerdirectives/checkcompilerdirectives +# 4d63.com/gochecknoglobals v0.2.1 ## explicit; go 1.15 4d63.com/gochecknoglobals/checknoglobals -# cloud.google.com/go v0.110.2 +# cloud.google.com/go v0.110.10 ## explicit; go 1.19 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.20.1 +# cloud.google.com/go/compute v1.23.3 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v0.13.0 +# cloud.google.com/go/iam v1.1.5 ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.29.0 +# cloud.google.com/go/storage v1.35.1 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 -cloud.google.com/go/storage/internal/apiv2/stubs -# github.com/BurntSushi/toml v1.2.1 +cloud.google.com/go/storage/internal/apiv2/storagepb +# github.com/4meepo/tagalign v1.3.3 +## explicit; go 1.19 +github.com/4meepo/tagalign +# github.com/Abirdcfly/dupword v0.0.13 +## explicit; go 1.20 +github.com/Abirdcfly/dupword +# github.com/Antonboom/errname v0.1.12 +## explicit; go 1.20 +github.com/Antonboom/errname/pkg/analyzer +# github.com/Antonboom/nilnil v0.1.7 +## explicit; go 1.20 +github.com/Antonboom/nilnil/pkg/analyzer +# github.com/Antonboom/testifylint v1.1.1 +## explicit; go 1.20 +github.com/Antonboom/testifylint/analyzer +github.com/Antonboom/testifylint/internal/analysisutil +github.com/Antonboom/testifylint/internal/checkers +github.com/Antonboom/testifylint/internal/config +github.com/Antonboom/testifylint/internal/testify +# github.com/BurntSushi/toml v1.3.2 ## explicit; go 1.16 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal # github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 ## explicit; go 1.13 github.com/Djarvur/go-err113 +# github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 +## explicit; go 1.21 +github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer +github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment +github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern +github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure # github.com/Masterminds/goutils v1.1.1 ## explicit github.com/Masterminds/goutils @@ -39,15 +67,25 @@ github.com/Masterminds/semver # github.com/Masterminds/sprig v2.22.0+incompatible ## explicit github.com/Masterminds/sprig -# github.com/OpenPeeDeeP/depguard v1.1.1 -## explicit; go 1.13 -github.com/OpenPeeDeeP/depguard +# github.com/OpenPeeDeeP/depguard/v2 v2.2.0 +## explicit; go 1.20 +github.com/OpenPeeDeeP/depguard/v2 +github.com/OpenPeeDeeP/depguard/v2/internal/utils # github.com/agext/levenshtein v1.2.2 ## explicit github.com/agext/levenshtein +# github.com/alecthomas/go-check-sumtype v0.1.4 +## explicit; go 1.18 +github.com/alecthomas/go-check-sumtype +# github.com/alexkohler/nakedret/v2 v2.0.2 +## explicit; go 1.18 +github.com/alexkohler/nakedret/v2 # github.com/alexkohler/prealloc v1.0.0 ## explicit; go 1.15 github.com/alexkohler/prealloc/pkg +# github.com/alingse/asasalint v0.0.11 +## explicit; go 1.18 +github.com/alingse/asasalint # github.com/apparentlymart/go-cidr v1.1.0 ## explicit github.com/apparentlymart/go-cidr/cidr @@ -60,8 +98,8 @@ github.com/apparentlymart/go-textseg/v13/textseg # github.com/armon/go-radix v1.0.0 ## explicit github.com/armon/go-radix -# github.com/ashanbrown/forbidigo v1.3.0 -## explicit; go 1.12 +# github.com/ashanbrown/forbidigo v1.6.0 +## explicit; go 1.13 github.com/ashanbrown/forbidigo/forbidigo # github.com/ashanbrown/makezero v1.1.1 ## explicit; go 1.12 @@ -126,81 +164,134 @@ github.com/bgentry/go-netrc/netrc # github.com/bgentry/speakeasy v0.1.0 ## explicit github.com/bgentry/speakeasy -# github.com/bkielbasa/cyclop v1.2.0 -## explicit; go 1.15 +# github.com/bkielbasa/cyclop v1.2.1 +## explicit; go 1.20 github.com/bkielbasa/cyclop/pkg/analyzer -# github.com/bombsimon/wsl/v3 v3.3.0 -## explicit; go 1.12 -github.com/bombsimon/wsl/v3 +# github.com/blizzy78/varnamelen v0.8.0 +## explicit; go 1.16 +github.com/blizzy78/varnamelen +# github.com/bombsimon/wsl/v4 v4.2.0 +## explicit; go 1.19 +github.com/bombsimon/wsl/v4 +# github.com/breml/bidichk v0.2.7 +## explicit; go 1.20 +github.com/breml/bidichk/pkg/bidichk +# github.com/breml/errchkjson v0.3.6 +## explicit; go 1.20 +github.com/breml/errchkjson +# github.com/butuzov/ireturn v0.3.0 +## explicit; go 1.18 +github.com/butuzov/ireturn/analyzer +github.com/butuzov/ireturn/analyzer/internal/config +github.com/butuzov/ireturn/analyzer/internal/types +# github.com/butuzov/mirror v1.1.0 +## explicit; go 1.19 +github.com/butuzov/mirror +github.com/butuzov/mirror/internal/checker +# github.com/catenacyber/perfsprint v0.6.0 +## explicit; go 1.20 +github.com/catenacyber/perfsprint/analyzer +# github.com/ccojocar/zxcvbn-go v1.0.1 +## explicit; go 1.20 +github.com/ccojocar/zxcvbn-go +github.com/ccojocar/zxcvbn-go/adjacency +github.com/ccojocar/zxcvbn-go/data +github.com/ccojocar/zxcvbn-go/entropy +github.com/ccojocar/zxcvbn-go/frequency +github.com/ccojocar/zxcvbn-go/match +github.com/ccojocar/zxcvbn-go/matching +github.com/ccojocar/zxcvbn-go/scoring +github.com/ccojocar/zxcvbn-go/utils/math # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/charithe/durationcheck v0.0.9 +# github.com/charithe/durationcheck v0.0.10 ## explicit; go 1.14 github.com/charithe/durationcheck -# github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 +# github.com/chavacava/garif v0.1.0 ## explicit; go 1.16 github.com/chavacava/garif # github.com/client9/misspell v0.3.4 ## explicit github.com/client9/misspell github.com/client9/misspell/cmd/misspell -# github.com/daixiang0/gci v0.2.9 -## explicit; go 1.14 +# github.com/curioswitch/go-reassign v0.2.0 +## explicit; go 1.18 +github.com/curioswitch/go-reassign +github.com/curioswitch/go-reassign/internal/analyzer +# github.com/daixiang0/gci v0.12.1 +## explicit; go 1.18 +github.com/daixiang0/gci/pkg/config +github.com/daixiang0/gci/pkg/format github.com/daixiang0/gci/pkg/gci +github.com/daixiang0/gci/pkg/io +github.com/daixiang0/gci/pkg/log +github.com/daixiang0/gci/pkg/parse +github.com/daixiang0/gci/pkg/section +github.com/daixiang0/gci/pkg/specificity +github.com/daixiang0/gci/pkg/utils # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/denis-tingajkin/go-header v0.4.2 -## explicit; go 1.15 -github.com/denis-tingajkin/go-header +# github.com/denis-tingaikin/go-header v0.4.3 +## explicit; go 1.17 +github.com/denis-tingaikin/go-header # github.com/esimonov/ifshort v1.0.4 ## explicit; go 1.17 github.com/esimonov/ifshort/pkg/analyzer -# github.com/ettle/strcase v0.1.1 +# github.com/ettle/strcase v0.2.0 ## explicit; go 1.12 github.com/ettle/strcase -# github.com/fatih/color v1.13.0 -## explicit; go 1.13 +# github.com/fatih/color v1.16.0 +## explicit; go 1.17 github.com/fatih/color # github.com/fatih/structtag v1.2.0 ## explicit; go 1.12 github.com/fatih/structtag +# github.com/firefart/nonamedreturns v1.0.4 +## explicit; go 1.18 +github.com/firefart/nonamedreturns/analyzer # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify # github.com/fzipp/gocyclo v0.6.0 ## explicit; go 1.18 github.com/fzipp/gocyclo -# github.com/go-critic/go-critic v0.6.5 -## explicit; go 1.16 +# github.com/ghostiam/protogetter v0.3.4 +## explicit; go 1.19 +github.com/ghostiam/protogetter +# github.com/go-critic/go-critic v0.11.0 +## explicit; go 1.18 github.com/go-critic/go-critic/checkers github.com/go-critic/go-critic/checkers/internal/astwalk github.com/go-critic/go-critic/checkers/internal/lintutil github.com/go-critic/go-critic/checkers/rulesdata -github.com/go-critic/go-critic/framework/linter -# github.com/go-toolsmith/astcast v1.0.0 -## explicit +github.com/go-critic/go-critic/linter +# github.com/go-toolsmith/astcast v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/astcast -# github.com/go-toolsmith/astcopy v1.0.2 +# github.com/go-toolsmith/astcopy v1.1.0 ## explicit; go 1.16 github.com/go-toolsmith/astcopy -# github.com/go-toolsmith/astequal v1.0.3 +# github.com/go-toolsmith/astequal v1.1.0 ## explicit; go 1.16 github.com/go-toolsmith/astequal -# github.com/go-toolsmith/astfmt v1.0.0 -## explicit +# github.com/go-toolsmith/astfmt v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/astfmt -# github.com/go-toolsmith/astp v1.0.0 -## explicit +# github.com/go-toolsmith/astp v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/astp -# github.com/go-toolsmith/strparse v1.0.0 -## explicit +# github.com/go-toolsmith/strparse v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/strparse -# github.com/go-toolsmith/typep v1.0.2 -## explicit +# github.com/go-toolsmith/typep v1.1.0 +## explicit; go 1.16 github.com/go-toolsmith/typep -# github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b +# github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 +## explicit; go 1.18 +github.com/go-viper/mapstructure/v2 +# github.com/go-xmlfmt/xmlfmt v1.1.2 ## explicit github.com/go-xmlfmt/xmlfmt # github.com/gobwas/glob v0.2.3 @@ -243,14 +334,13 @@ github.com/golangci/dupl/syntax/golang # github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe ## explicit; go 1.17 github.com/golangci/go-misc/deadcode -# github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 -## explicit; go 1.18 +# github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e +## explicit; go 1.20 github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/gofmt/internal/diff -github.com/golangci/gofmt/gofmt/internal/execabs github.com/golangci/gofmt/goimports -# github.com/golangci/golangci-lint v1.41.1 -## explicit; go 1.15 +# github.com/golangci/golangci-lint v1.56.1 +## explicit; go 1.21 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache github.com/golangci/golangci-lint/internal/errorutil @@ -275,7 +365,6 @@ github.com/golangci/golangci-lint/pkg/printers github.com/golangci/golangci-lint/pkg/report github.com/golangci/golangci-lint/pkg/result github.com/golangci/golangci-lint/pkg/result/processors -github.com/golangci/golangci-lint/pkg/sliceutil github.com/golangci/golangci-lint/pkg/timeutils # github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 ## explicit @@ -283,11 +372,11 @@ github.com/golangci/lint-1 # github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca ## explicit github.com/golangci/maligned -# github.com/golangci/misspell v0.3.5 -## explicit +# github.com/golangci/misspell v0.4.1 +## explicit; go 1.19 github.com/golangci/misspell -# github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 -## explicit; go 1.17 +# github.com/golangci/revgrep v0.5.2 +## explicit; go 1.19 github.com/golangci/revgrep # github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 ## explicit @@ -305,8 +394,8 @@ github.com/google/go-github/v57/github # github.com/google/go-querystring v1.1.0 ## explicit; go 1.10 github.com/google/go-querystring/query -# github.com/google/s2a-go v0.1.4 -## explicit; go 1.16 +# github.com/google/s2a-go v0.1.7 +## explicit; go 1.19 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -326,21 +415,23 @@ github.com/google/s2a-go/internal/v2 github.com/google/s2a-go/internal/v2/certverifier github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/retry github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +# github.com/googleapis/enterprise-certificate-proxy v0.3.2 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.11.0 +# github.com/googleapis/gax-go/v2 v2.12.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 +# github.com/gordonklaus/ineffassign v0.1.0 ## explicit; go 1.14 github.com/gordonklaus/ineffassign/pkg/ineffassign # github.com/gostaticanalysis/analysisutil v0.7.1 @@ -356,8 +447,6 @@ github.com/gostaticanalysis/forcetypeassert # github.com/gostaticanalysis/nilerr v0.1.1 ## explicit; go 1.15 github.com/gostaticanalysis/nilerr -# github.com/gostaticanalysis/testutil v0.4.0 -## explicit; go 1.16 # github.com/hashicorp/errwrap v1.0.0 ## explicit github.com/hashicorp/errwrap @@ -488,16 +577,21 @@ github.com/hashicorp/terraform-svchost/disco # github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d ## explicit github.com/hashicorp/yamux +# github.com/hexops/gotextdiff v1.0.3 +## explicit; go 1.16 +github.com/hexops/gotextdiff +github.com/hexops/gotextdiff/myers +github.com/hexops/gotextdiff/span # github.com/huandu/xstrings v1.3.2 ## explicit; go 1.12 github.com/huandu/xstrings # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo -# github.com/inconshreveable/mousetrap v1.0.1 +# github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jgautheron/goconst v1.5.1 +# github.com/jgautheron/goconst v1.7.0 ## explicit; go 1.13 github.com/jgautheron/goconst # github.com/jingyugao/rowserrcheck v1.1.1 @@ -506,19 +600,25 @@ github.com/jingyugao/rowserrcheck/passes/rowserr # github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af ## explicit; go 1.13 github.com/jirfag/go-printf-func-name/pkg/analyzer +# github.com/jjti/go-spancheck v0.5.2 +## explicit; go 1.20 +github.com/jjti/go-spancheck # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath # github.com/julz/importas v0.1.0 ## explicit; go 1.15 github.com/julz/importas -# github.com/kisielk/errcheck v1.6.2 -## explicit; go 1.14 +# github.com/kisielk/errcheck v1.7.0 +## explicit; go 1.18 github.com/kisielk/errcheck/errcheck # github.com/kisielk/gotool v1.0.0 ## explicit github.com/kisielk/gotool github.com/kisielk/gotool/internal/load +# github.com/kkHAIKE/contextcheck v1.1.4 +## explicit; go 1.20 +github.com/kkHAIKE/contextcheck # github.com/klauspost/compress v1.15.11 ## explicit; go 1.17 github.com/klauspost/compress @@ -531,31 +631,48 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/kulti/thelper v0.6.3 ## explicit; go 1.18 github.com/kulti/thelper/pkg/analyzer -# github.com/kunwardeep/paralleltest v1.0.6 +# github.com/kunwardeep/paralleltest v1.0.9 ## explicit; go 1.17 github.com/kunwardeep/paralleltest/pkg/paralleltest -# github.com/kyoh86/exportloopref v0.1.8 -## explicit; go 1.14 +# github.com/kyoh86/exportloopref v0.1.11 +## explicit; go 1.18 github.com/kyoh86/exportloopref # github.com/ldez/gomoddirectives v0.2.3 ## explicit; go 1.16 github.com/ldez/gomoddirectives -# github.com/ldez/tagliatelle v0.3.1 -## explicit; go 1.16 +# github.com/ldez/tagliatelle v0.5.0 +## explicit; go 1.19 github.com/ldez/tagliatelle +# github.com/leonklingele/grouper v1.1.1 +## explicit; go 1.17 +github.com/leonklingele/grouper/pkg/analyzer +github.com/leonklingele/grouper/pkg/analyzer/consts +github.com/leonklingele/grouper/pkg/analyzer/globals +github.com/leonklingele/grouper/pkg/analyzer/imports +github.com/leonklingele/grouper/pkg/analyzer/types +github.com/leonklingele/grouper/pkg/analyzer/vars +# github.com/lufeee/execinquery v1.2.1 +## explicit; go 1.17 +github.com/lufeee/execinquery +# github.com/macabu/inamedparam v0.1.3 +## explicit; go 1.20 +github.com/macabu/inamedparam # github.com/magiconair/properties v1.8.6 ## explicit; go 1.13 github.com/magiconair/properties -# github.com/maratori/testpackage v1.1.0 -## explicit; go 1.18 +# github.com/maratori/testableexamples v1.0.0 +## explicit; go 1.19 +github.com/maratori/testableexamples/pkg/testableexamples +# github.com/maratori/testpackage v1.1.1 +## explicit; go 1.20 github.com/maratori/testpackage/pkg/testpackage -# github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 +# github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 ## explicit; go 1.13 github.com/matoous/godox # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.16 +# github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.9 @@ -567,13 +684,11 @@ github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mbilski/exhaustivestruct v1.2.0 ## explicit; go 1.15 github.com/mbilski/exhaustivestruct/pkg/analyzer -# github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 -## explicit -github.com/mgechev/dots -# github.com/mgechev/revive v1.2.4 -## explicit; go 1.19 +# github.com/mgechev/revive v1.3.7 +## explicit; go 1.20 github.com/mgechev/revive/config github.com/mgechev/revive/formatter +github.com/mgechev/revive/internal/ifelse github.com/mgechev/revive/internal/typeparams github.com/mgechev/revive/lint github.com/mgechev/revive/rule @@ -601,31 +716,29 @@ github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.1 ## explicit github.com/mitchellh/reflectwalk -# github.com/moricho/tparallel v0.2.1 -## explicit; go 1.15 +# github.com/moricho/tparallel v0.3.1 +## explicit; go 1.20 github.com/moricho/tparallel github.com/moricho/tparallel/pkg/ssafunc github.com/moricho/tparallel/pkg/ssainstr # github.com/nakabonne/nestif v0.3.1 ## explicit; go 1.15 github.com/nakabonne/nestif -# github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 -## explicit; go 1.14 -github.com/nbutton23/zxcvbn-go -github.com/nbutton23/zxcvbn-go/adjacency -github.com/nbutton23/zxcvbn-go/data -github.com/nbutton23/zxcvbn-go/entropy -github.com/nbutton23/zxcvbn-go/frequency -github.com/nbutton23/zxcvbn-go/match -github.com/nbutton23/zxcvbn-go/matching -github.com/nbutton23/zxcvbn-go/scoring -github.com/nbutton23/zxcvbn-go/utils/math -# github.com/nishanths/exhaustive v0.8.3 -## explicit; go 1.14 +# github.com/nishanths/exhaustive v0.12.0 +## explicit; go 1.18 github.com/nishanths/exhaustive # github.com/nishanths/predeclared v0.2.2 ## explicit; go 1.14 github.com/nishanths/predeclared/passes/predeclared +# github.com/nunnatsa/ginkgolinter v0.15.2 +## explicit; go 1.20 +github.com/nunnatsa/ginkgolinter +github.com/nunnatsa/ginkgolinter/ginkgohandler +github.com/nunnatsa/ginkgolinter/gomegahandler +github.com/nunnatsa/ginkgolinter/interfaces +github.com/nunnatsa/ginkgolinter/reverseassertion +github.com/nunnatsa/ginkgolinter/types +github.com/nunnatsa/ginkgolinter/version # github.com/oklog/run v1.0.0 ## explicit github.com/oklog/run @@ -635,17 +748,17 @@ github.com/olekukonko/tablewriter # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml -# github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d -## explicit -github.com/phayes/checkstyle -# github.com/pkg/errors v0.9.1 -## explicit -github.com/pkg/errors +# github.com/pelletier/go-toml/v2 v2.0.5 +## explicit; go 1.16 +github.com/pelletier/go-toml/v2 +github.com/pelletier/go-toml/v2/internal/ast +github.com/pelletier/go-toml/v2/internal/danger +github.com/pelletier/go-toml/v2/internal/tracker # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/polyfloyd/go-errorlint v1.0.5 -## explicit; go 1.13 +# github.com/polyfloyd/go-errorlint v1.4.8 +## explicit; go 1.20 github.com/polyfloyd/go-errorlint/errorlint # github.com/posener/complete v1.2.3 ## explicit; go 1.13 @@ -670,8 +783,8 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/quasilyte/go-ruleguard v0.3.18 -## explicit; go 1.17 +# github.com/quasilyte/go-ruleguard v0.4.0 +## explicit; go 1.19 github.com/quasilyte/go-ruleguard/internal/goenv github.com/quasilyte/go-ruleguard/internal/golist github.com/quasilyte/go-ruleguard/internal/xsrcimporter @@ -687,30 +800,39 @@ github.com/quasilyte/go-ruleguard/ruleguard/quasigo/stdlib/qstrconv github.com/quasilyte/go-ruleguard/ruleguard/quasigo/stdlib/qstrings github.com/quasilyte/go-ruleguard/ruleguard/textmatch github.com/quasilyte/go-ruleguard/ruleguard/typematch -# github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f +# github.com/quasilyte/gogrep v0.5.0 ## explicit; go 1.16 github.com/quasilyte/gogrep github.com/quasilyte/gogrep/internal/stdinfo github.com/quasilyte/gogrep/nodetag -# github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 +# github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 ## explicit; go 1.14 github.com/quasilyte/regex/syntax # github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 ## explicit; go 1.17 github.com/quasilyte/stdinfo -# github.com/ryancurrah/gomodguard v1.2.4 -## explicit; go 1.16 +# github.com/ryancurrah/gomodguard v1.3.0 +## explicit; go 1.19 github.com/ryancurrah/gomodguard -# github.com/ryanrolds/sqlclosecheck v0.3.0 -## explicit; go 1.13 +# github.com/ryanrolds/sqlclosecheck v0.5.1 +## explicit; go 1.20 github.com/ryanrolds/sqlclosecheck/pkg/analyzer -# github.com/sanposhiho/wastedassign/v2 v2.0.6 +# github.com/sanposhiho/wastedassign/v2 v2.0.7 ## explicit; go 1.14 github.com/sanposhiho/wastedassign/v2 -# github.com/securego/gosec/v2 v2.13.1 -## explicit; go 1.19 +# github.com/sashamelentyev/interfacebloat v1.1.0 +## explicit; go 1.18 +github.com/sashamelentyev/interfacebloat/pkg/analyzer +# github.com/sashamelentyev/usestdlibvars v1.24.0 +## explicit; go 1.20 +github.com/sashamelentyev/usestdlibvars/pkg/analyzer +github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping +# github.com/securego/gosec/v2 v2.18.2 +## explicit; go 1.20 github.com/securego/gosec/v2 +github.com/securego/gosec/v2/analyzers github.com/securego/gosec/v2/cwe +github.com/securego/gosec/v2/issue github.com/securego/gosec/v2/rules # github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c ## explicit @@ -723,25 +845,35 @@ github.com/shurcooL/githubv4 github.com/shurcooL/graphql github.com/shurcooL/graphql/ident github.com/shurcooL/graphql/internal/jsonutil -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/sonatard/noctx v0.0.1 -## explicit; go 1.13 +# github.com/sivchari/containedctx v1.0.3 +## explicit; go 1.17 +github.com/sivchari/containedctx +# github.com/sivchari/nosnakecase v1.7.0 +## explicit; go 1.18 +github.com/sivchari/nosnakecase +# github.com/sivchari/tenv v1.7.1 +## explicit; go 1.18 +github.com/sivchari/tenv +# github.com/sonatard/noctx v0.0.2 +## explicit; go 1.20 github.com/sonatard/noctx github.com/sonatard/noctx/ngfunc github.com/sonatard/noctx/reqwithoutctx -# github.com/sourcegraph/go-diff v0.6.1 +# github.com/sourcegraph/go-diff v0.7.0 ## explicit; go 1.14 github.com/sourcegraph/go-diff/diff -# github.com/spf13/afero v1.4.1 -## explicit; go 1.13 +# github.com/spf13/afero v1.11.0 +## explicit; go 1.19 github.com/spf13/afero +github.com/spf13/afero/internal/common github.com/spf13/afero/mem # github.com/spf13/cast v1.5.0 ## explicit; go 1.18 github.com/spf13/cast -# github.com/spf13/cobra v1.6.0 +# github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 @@ -750,12 +882,23 @@ github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.7.1 -## explicit; go 1.12 +# github.com/spf13/viper v1.12.0 +## explicit; go 1.17 github.com/spf13/viper +github.com/spf13/viper/internal/encoding +github.com/spf13/viper/internal/encoding/dotenv +github.com/spf13/viper/internal/encoding/hcl +github.com/spf13/viper/internal/encoding/ini +github.com/spf13/viper/internal/encoding/javaproperties +github.com/spf13/viper/internal/encoding/json +github.com/spf13/viper/internal/encoding/toml +github.com/spf13/viper/internal/encoding/yaml # github.com/ssgreg/nlreturn/v2 v2.2.1 ## explicit; go 1.13 github.com/ssgreg/nlreturn/v2/pkg/nlreturn +# github.com/stbenjam/no-sprintf-host-port v0.1.1 +## explicit; go 1.16 +github.com/stbenjam/no-sprintf-host-port/pkg/analyzer # github.com/stretchr/objx v0.5.0 ## explicit; go 1.12 github.com/stretchr/objx @@ -766,17 +909,29 @@ github.com/stretchr/testify/mock # github.com/subosito/gotenv v1.4.1 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tdakkota/asciicheck v0.1.1 -## explicit; go 1.13 +# github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c +## explicit +github.com/t-yuki/gocover-cobertura +# github.com/tdakkota/asciicheck v0.2.0 +## explicit; go 1.18 github.com/tdakkota/asciicheck -# github.com/tetafro/godot v1.4.11 -## explicit; go 1.16 +# github.com/tetafro/godot v1.4.16 +## explicit; go 1.20 github.com/tetafro/godot -# github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 +# github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 ## explicit; go 1.12 github.com/timakin/bodyclose/passes/bodyclose -# github.com/tomarrell/wrapcheck/v2 v2.3.0 -## explicit; go 1.16 +# github.com/timonwong/loggercheck v0.9.4 +## explicit; go 1.18 +github.com/timonwong/loggercheck +github.com/timonwong/loggercheck/internal/bytebufferpool +github.com/timonwong/loggercheck/internal/checkers +github.com/timonwong/loggercheck/internal/checkers/printf +github.com/timonwong/loggercheck/internal/rules +github.com/timonwong/loggercheck/internal/sets +github.com/timonwong/loggercheck/internal/stringutil +# github.com/tomarrell/wrapcheck/v2 v2.8.1 +## explicit; go 1.18 github.com/tomarrell/wrapcheck/v2/wrapcheck # github.com/tommy-muehle/go-mnd/v2 v2.5.1 ## explicit; go 1.12 @@ -789,13 +944,13 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/ultraware/funlen v0.0.3 -## explicit +# github.com/ultraware/funlen v0.1.0 +## explicit; go 1.20 github.com/ultraware/funlen -# github.com/ultraware/whitespace v0.0.5 -## explicit +# github.com/ultraware/whitespace v0.1.0 +## explicit; go 1.20 github.com/ultraware/whitespace -# github.com/uudashr/gocognit v1.0.6 +# github.com/uudashr/gocognit v1.1.2 ## explicit; go 1.16 github.com/uudashr/gocognit # github.com/vmihailenco/msgpack/v4 v4.3.12 @@ -807,9 +962,20 @@ github.com/vmihailenco/msgpack/v4/codes github.com/vmihailenco/tagparser github.com/vmihailenco/tagparser/internal github.com/vmihailenco/tagparser/internal/parser +# github.com/xen0n/gosmopolitan v1.2.2 +## explicit; go 1.19 +github.com/xen0n/gosmopolitan +# github.com/yagipy/maintidx v1.0.0 +## explicit; go 1.17 +github.com/yagipy/maintidx +github.com/yagipy/maintidx/pkg/cyc +github.com/yagipy/maintidx/pkg/halstvol # github.com/yeya24/promlinter v0.2.0 ## explicit; go 1.16 github.com/yeya24/promlinter +# github.com/ykadowak/zerologlint v0.1.5 +## explicit; go 1.19 +github.com/ykadowak/zerologlint # github.com/zclconf/go-cty v1.8.2 ## explicit; go 1.12 github.com/zclconf/go-cty/cty @@ -823,6 +989,15 @@ github.com/zclconf/go-cty/cty/set # github.com/zclconf/go-cty-yaml v1.0.2 ## explicit github.com/zclconf/go-cty-yaml +# gitlab.com/bosi/decorder v0.4.1 +## explicit; go 1.20 +gitlab.com/bosi/decorder +# go-simpler.org/musttag v0.8.0 +## explicit; go 1.20 +go-simpler.org/musttag +# go-simpler.org/sloglint v0.4.0 +## explicit; go 1.20 +go-simpler.org/sloglint # go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io @@ -842,6 +1017,21 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.uber.org/atomic v1.7.0 +## explicit; go 1.13 +go.uber.org/atomic +# go.uber.org/multierr v1.6.0 +## explicit; go 1.12 +go.uber.org/multierr +# go.uber.org/zap v1.24.0 +## explicit; go 1.19 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore # golang.org/x/crypto v0.18.0 ## explicit; go 1.18 golang.org/x/crypto/bcrypt @@ -871,11 +1061,14 @@ golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 +# golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc +## explicit; go 1.20 +golang.org/x/exp/maps +# golang.org/x/exp/typeparams v0.0.0-20231219180239-dc181d75b848 ## explicit; go 1.18 golang.org/x/exp/typeparams -# golang.org/x/mod v0.8.0 -## explicit; go 1.17 +# golang.org/x/mod v0.15.0 +## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module @@ -901,25 +1094,30 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.2.0 -## explicit +# golang.org/x/sync v0.6.0 +## explicit; go 1.18 +golang.org/x/sync/errgroup golang.org/x/sync/semaphore # golang.org/x/sys v0.16.0 ## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.14.0 ## explicit; go 1.18 +golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/tools v0.6.0 +# golang.org/x/time v0.5.0 +## explicit; go 1.18 +golang.org/x/time/rate +# golang.org/x/tools v0.17.0 ## explicit; go 1.18 golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/passes/appends golang.org/x/tools/go/analysis/passes/asmdecl golang.org/x/tools/go/analysis/passes/assign golang.org/x/tools/go/analysis/passes/atomic @@ -932,6 +1130,8 @@ golang.org/x/tools/go/analysis/passes/composite golang.org/x/tools/go/analysis/passes/copylock golang.org/x/tools/go/analysis/passes/ctrlflow golang.org/x/tools/go/analysis/passes/deepequalerrors +golang.org/x/tools/go/analysis/passes/defers +golang.org/x/tools/go/analysis/passes/directive golang.org/x/tools/go/analysis/passes/errorsas golang.org/x/tools/go/analysis/passes/fieldalignment golang.org/x/tools/go/analysis/passes/findcall @@ -950,12 +1150,14 @@ golang.org/x/tools/go/analysis/passes/reflectvaluecompare golang.org/x/tools/go/analysis/passes/shadow golang.org/x/tools/go/analysis/passes/shift golang.org/x/tools/go/analysis/passes/sigchanyzer +golang.org/x/tools/go/analysis/passes/slog golang.org/x/tools/go/analysis/passes/sortslice golang.org/x/tools/go/analysis/passes/stdmethods golang.org/x/tools/go/analysis/passes/stringintconv golang.org/x/tools/go/analysis/passes/structtag golang.org/x/tools/go/analysis/passes/testinggoroutine golang.org/x/tools/go/analysis/passes/tests +golang.org/x/tools/go/analysis/passes/timeformat golang.org/x/tools/go/analysis/passes/unmarshal golang.org/x/tools/go/analysis/passes/unreachable golang.org/x/tools/go/analysis/passes/unsafeptr @@ -980,7 +1182,7 @@ golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk @@ -990,11 +1192,12 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal +golang.org/x/tools/internal/versions # golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 ## explicit; go 1.17 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.126.0 +# google.golang.org/api v0.152.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1025,27 +1228,24 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 -google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.56.3 -## explicit; go 1.17 +# google.golang.org/grpc v1.59.0 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1089,6 +1289,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver @@ -1104,8 +1305,10 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/reflection +google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status @@ -1162,8 +1365,8 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# honnef.co/go/tools v0.3.3 -## explicit; go 1.17 +# honnef.co/go/tools v0.4.6 +## explicit; go 1.19 honnef.co/go/tools/analysis/code honnef.co/go/tools/analysis/edit honnef.co/go/tools/analysis/facts/deprecated @@ -1192,9 +1395,12 @@ honnef.co/go/tools/staticcheck/fakereflect honnef.co/go/tools/staticcheck/fakexml honnef.co/go/tools/stylecheck honnef.co/go/tools/unused -# mvdan.cc/gofumpt v0.4.0 -## explicit; go 1.18 +# mvdan.cc/gofumpt v0.6.0 +## explicit; go 1.20 mvdan.cc/gofumpt/format +mvdan.cc/gofumpt/internal/govendor/go/doc/comment +mvdan.cc/gofumpt/internal/govendor/go/format +mvdan.cc/gofumpt/internal/govendor/go/printer mvdan.cc/gofumpt/internal/version # mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed ## explicit @@ -1202,6 +1408,6 @@ mvdan.cc/interfacer/check # mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b ## explicit mvdan.cc/lint -# mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 -## explicit; go 1.17 +# mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14 +## explicit; go 1.20 mvdan.cc/unparam/check diff --git a/vendor/mvdan.cc/gofumpt/format/format.go b/vendor/mvdan.cc/gofumpt/format/format.go index fa4f2fcb6d..7316eb78b8 100644 --- a/vendor/mvdan.cc/gofumpt/format/format.go +++ b/vendor/mvdan.cc/gofumpt/format/format.go @@ -10,7 +10,6 @@ import ( "bytes" "fmt" "go/ast" - "go/format" "go/parser" "go/token" "os" @@ -26,22 +25,26 @@ import ( "golang.org/x/mod/semver" "golang.org/x/tools/go/ast/astutil" + "mvdan.cc/gofumpt/internal/govendor/go/format" "mvdan.cc/gofumpt/internal/version" ) // Options is the set of formatting options which affect gofumpt. type Options struct { - // LangVersion corresponds to the Go language version a piece of code is - // written in. The version is used to decide whether to apply formatting - // rules which require new language features. When inside a Go module, - // LangVersion should be: - // - // go mod edit -json | jq -r '.Go' + // TODO: link to the go/version docs once Go 1.22 is out. + // The old semver docs said: // // LangVersion is treated as a semantic version, which may start with a "v" // prefix. Like Go versions, it may also be incomplete; "1.14" is equivalent // to "1.14.0". When empty, it is equivalent to "v1", to not use language // features which could break programs. + + // LangVersion is the Go version a piece of code is written in. + // The version is used to decide whether to apply formatting + // rules which require new language features. + // When inside a Go module, LangVersion should typically be: + // + // go mod edit -json | jq -r '.Go' LangVersion string // ModulePath corresponds to the Go module path which contains the source @@ -82,22 +85,28 @@ func Source(src []byte, opts Options) ([]byte, error) { return buf.Bytes(), nil } +var rxGoVersionMajorMinor = regexp.MustCompile(`^(v|go)?([1-9]+)\.([0-9]+)`) + // File modifies a file and fset in place to follow gofumpt's format. The // changes might include manipulating adding or removing newlines in fset, // modifying the position of nodes, or modifying literal values. func File(fset *token.FileSet, file *ast.File, opts Options) { simplify(file) + // TODO: replace this hacky mess with go/version once we can rely on Go 1.22, + // as well as replacing our uses of the semver package. + // In particular, we likely want to allow any of 1.21, 1.21.2, or go1.21rc3, + // but we can rely on go/version.Lang to validate and normalize. if opts.LangVersion == "" { - opts.LangVersion = "v1" - } else if opts.LangVersion[0] != 'v' { - opts.LangVersion = "v" + opts.LangVersion + opts.LangVersion = "v1.0" } - if !semver.IsValid(opts.LangVersion) { - panic(fmt.Sprintf("invalid semver string: %q", opts.LangVersion)) + m := rxGoVersionMajorMinor.FindStringSubmatch(opts.LangVersion) + if m == nil { + panic(fmt.Sprintf("invalid Go version: %q", opts.LangVersion)) } + opts.LangVersion = "v" + m[2] + "." + m[3] f := &fumpter{ - File: fset.File(file.Pos()), + file: fset.File(file.Pos()), fset: fset, astFile: file, Options: opts, @@ -170,7 +179,7 @@ var rxOctalInteger = regexp.MustCompile(`\A0[0-7_]+\z`) type fumpter struct { Options - *token.File + file *token.File fset *token.FileSet astFile *ast.File @@ -217,7 +226,8 @@ func (f *fumpter) inlineComment(pos token.Pos) *ast.Comment { func (f *fumpter) addNewline(at token.Pos) { offset := f.Offset(at) - field := reflect.ValueOf(f.File).Elem().FieldByName("lines") + // TODO: replace with the new Lines method once we require Go 1.21 or later + field := reflect.ValueOf(f.file).Elem().FieldByName("lines") n := field.Len() lines := make([]int, 0, n+1) for i := 0; i < n; i++ { @@ -236,7 +246,7 @@ func (f *fumpter) addNewline(at token.Pos) { if offset >= 0 { lines = append(lines, offset) } - if !f.SetLines(lines) { + if !f.file.SetLines(lines) { panic(fmt.Sprintf("could not set lines to %v", lines)) } } @@ -245,7 +255,7 @@ func (f *fumpter) addNewline(at token.Pos) { // up on the same line. func (f *fumpter) removeLines(fromLine, toLine int) { for fromLine < toLine { - f.MergeLine(fromLine) + f.file.MergeLine(fromLine) toLine-- } } @@ -256,6 +266,18 @@ func (f *fumpter) removeLinesBetween(from, to token.Pos) { f.removeLines(f.Line(from)+1, f.Line(to)) } +func (f *fumpter) Position(p token.Pos) token.Position { + return f.file.PositionFor(p, false) +} + +func (f *fumpter) Line(p token.Pos) int { + return f.Position(p).Line +} + +func (f *fumpter) Offset(p token.Pos) int { + return f.file.Offset(p) +} + type byteCounter int func (b *byteCounter) Write(p []byte) (n int, err error) { @@ -285,14 +307,14 @@ func (f *fumpter) lineEnd(line int) token.Pos { if line < 1 { panic("illegal line number") } - total := f.LineCount() + total := f.file.LineCount() if line > total { panic("illegal line number") } if line == total { return f.astFile.End() } - return f.LineStart(line+1) - 1 + return f.file.LineStart(line+1) - 1 } // rxCommentDirective covers all common Go comment directives: @@ -305,10 +327,11 @@ func (f *fumpter) lineEnd(line int) token.Pos { // //sys(nb)? | syscall function wrapper prototypes // //nolint | nolint directive for golangci // //noinspection | noinspection directive for GoLand and friends +// //NOSONAR | NOSONAR directive for SonarQube // // Note that the "some-words:" matching expects a letter afterward, such as // "go:generate", to prevent matching false positives like "https://site". -var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|no(lint|inspection)\b)`) +var rxCommentDirective = regexp.MustCompile(`^([a-z-]+:[a-z]+|line\b|export\b|extern\b|sys(nb)?\b|no(lint|inspection)\b)|NOSONAR\b`) func (f *fumpter) applyPre(c *astutil.Cursor) { f.splitLongLine(c) @@ -395,7 +418,7 @@ func (f *fumpter) applyPre(c *astutil.Cursor) { slc := []string{ "//gofumpt:diagnose", "version:", - version.String(), + version.String(""), "flags:", "-lang=" + f.LangVersion, "-modpath=" + f.ModulePath, @@ -467,13 +490,19 @@ func (f *fumpter) applyPre(c *astutil.Cursor) { specEnd := node.Specs[0].End() if len(f.commentsBetween(node.TokPos, specPos)) > 0 { - // If the single spec has any comment, it must - // go before the entire declaration now. + // If the single spec has a comment on the line above, + // the comment must go before the entire declaration now. node.TokPos = specPos } else { f.removeLines(f.Line(node.TokPos), f.Line(specPos)) } - f.removeLines(f.Line(specEnd), f.Line(node.Rparen)) + if len(f.commentsBetween(specEnd, node.Rparen)) > 0 { + // Leave one newline to not force a comment on the next line to + // become an inline comment. + f.removeLines(f.Line(specEnd)+1, f.Line(node.Rparen)) + } else { + f.removeLines(f.Line(specEnd), f.Line(node.Rparen)) + } // Remove the parentheses. go/printer will automatically // get rid of the newlines. @@ -546,12 +575,19 @@ func (f *fumpter) applyPre(c *astutil.Cursor) { if f.Line(sign.Pos()) != endLine { handleMultiLine := func(fl *ast.FieldList) { + // Refuse to insert a newline before the closing token + // if the list is empty or all in one line. if fl == nil || len(fl.List) == 0 { return } + fieldOpeningLine := f.Line(fl.Opening) + fieldClosingLine := f.Line(fl.Closing) + if fieldOpeningLine == fieldClosingLine { + return + } + lastFieldEnd := fl.List[len(fl.List)-1].End() lastFieldLine := f.Line(lastFieldEnd) - fieldClosingLine := f.Line(fl.Closing) isLastFieldOnFieldClosingLine := lastFieldLine == fieldClosingLine isLastFieldOnSigClosingLine := lastFieldLine == endLine @@ -852,9 +888,9 @@ func (f *fumpter) stmts(list []ast.Stmt) { continue // not an if following another statement } as, ok := list[i-1].(*ast.AssignStmt) - if !ok || as.Tok != token.DEFINE || + if !ok || (as.Tok != token.DEFINE && as.Tok != token.ASSIGN) || !identEqual(as.Lhs[len(as.Lhs)-1], "err") { - continue // not "..., err := ..." + continue // not ", err :=" nor ", err =" } be, ok := ifs.Cond.(*ast.BinaryExpr) if !ok || ifs.Init != nil || ifs.Else != nil { diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/doc.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/doc.go new file mode 100644 index 0000000000..45a476aa9a --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/doc.go @@ -0,0 +1,36 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package comment implements parsing and reformatting of Go doc comments, +(documentation comments), which are comments that immediately precede +a top-level declaration of a package, const, func, type, or var. + +Go doc comment syntax is a simplified subset of Markdown that supports +links, headings, paragraphs, lists (without nesting), and preformatted text blocks. +The details of the syntax are documented at https://go.dev/doc/comment. + +To parse the text associated with a doc comment (after removing comment markers), +use a [Parser]: + + var p comment.Parser + doc := p.Parse(text) + +The result is a [*Doc]. +To reformat it as a doc comment, HTML, Markdown, or plain text, +use a [Printer]: + + var pr comment.Printer + os.Stdout.Write(pr.Text(doc)) + +The [Parser] and [Printer] types are structs whose fields can be +modified to customize the operations. +For details, see the documentation for those types. + +Use cases that need additional control over reformatting can +implement their own logic by inspecting the parsed syntax itself. +See the documentation for [Doc], [Block], [Text] for an overview +and links to additional types. +*/ +package comment diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/html.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/html.go new file mode 100644 index 0000000000..bc076f6a58 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/html.go @@ -0,0 +1,169 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "strconv" +) + +// An htmlPrinter holds the state needed for printing a Doc as HTML. +type htmlPrinter struct { + *Printer + tight bool +} + +// HTML returns an HTML formatting of the Doc. +// See the [Printer] documentation for ways to customize the HTML output. +func (p *Printer) HTML(d *Doc) []byte { + hp := &htmlPrinter{Printer: p} + var out bytes.Buffer + for _, x := range d.Content { + hp.block(&out, x) + } + return out.Bytes() +} + +// block prints the block x to out. +func (p *htmlPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T", x) + + case *Paragraph: + if !p.tight { + out.WriteString("

") + } + p.text(out, x.Text) + out.WriteString("\n") + + case *Heading: + out.WriteString("") + p.text(out, x.Text) + out.WriteString("\n") + + case *Code: + out.WriteString("

")
+		p.escape(out, x.Text)
+		out.WriteString("
\n") + + case *List: + kind := "ol>\n" + if x.Items[0].Number == "" { + kind = "ul>\n" + } + out.WriteString("<") + out.WriteString(kind) + next := "1" + for _, item := range x.Items { + out.WriteString("") + p.tight = !x.BlankBetween() + for _, blk := range item.Content { + p.block(out, blk) + } + p.tight = false + } + out.WriteString("= 0; i-- { + if b[i] < '9' { + b[i]++ + return string(b) + } + b[i] = '0' + } + return "1" + string(b) +} + +// text prints the text sequence x to out. +func (p *htmlPrinter) text(out *bytes.Buffer, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + p.escape(out, string(t)) + case Italic: + out.WriteString("") + p.escape(out, string(t)) + out.WriteString("") + case *Link: + out.WriteString(``) + p.text(out, t.Text) + out.WriteString("") + case *DocLink: + url := p.docLinkURL(t) + if url != "" { + out.WriteString(``) + } + p.text(out, t.Text) + if url != "" { + out.WriteString("") + } + } + } +} + +// escape prints s to out as plain text, +// escaping < & " ' and > to avoid being misinterpreted +// in larger HTML constructs. +func (p *htmlPrinter) escape(out *bytes.Buffer, s string) { + start := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + case '<': + out.WriteString(s[start:i]) + out.WriteString("<") + start = i + 1 + case '&': + out.WriteString(s[start:i]) + out.WriteString("&") + start = i + 1 + case '"': + out.WriteString(s[start:i]) + out.WriteString(""") + start = i + 1 + case '\'': + out.WriteString(s[start:i]) + out.WriteString("'") + start = i + 1 + case '>': + out.WriteString(s[start:i]) + out.WriteString(">") + start = i + 1 + } + } + out.WriteString(s[start:]) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/markdown.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/markdown.go new file mode 100644 index 0000000000..d8550f2e39 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/markdown.go @@ -0,0 +1,188 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "strings" +) + +// An mdPrinter holds the state needed for printing a Doc as Markdown. +type mdPrinter struct { + *Printer + headingPrefix string + raw bytes.Buffer +} + +// Markdown returns a Markdown formatting of the Doc. +// See the [Printer] documentation for ways to customize the Markdown output. +func (p *Printer) Markdown(d *Doc) []byte { + mp := &mdPrinter{ + Printer: p, + headingPrefix: strings.Repeat("#", p.headingLevel()) + " ", + } + + var out bytes.Buffer + for i, x := range d.Content { + if i > 0 { + out.WriteByte('\n') + } + mp.block(&out, x) + } + return out.Bytes() +} + +// block prints the block x to out. +func (p *mdPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T", x) + + case *Paragraph: + p.text(out, x.Text) + out.WriteString("\n") + + case *Heading: + out.WriteString(p.headingPrefix) + p.text(out, x.Text) + if id := p.headingID(x); id != "" { + out.WriteString(" {#") + out.WriteString(id) + out.WriteString("}") + } + out.WriteString("\n") + + case *Code: + md := x.Text + for md != "" { + var line string + line, md, _ = strings.Cut(md, "\n") + if line != "" { + out.WriteString("\t") + out.WriteString(line) + } + out.WriteString("\n") + } + + case *List: + loose := x.BlankBetween() + for i, item := range x.Items { + if i > 0 && loose { + out.WriteString("\n") + } + if n := item.Number; n != "" { + out.WriteString(" ") + out.WriteString(n) + out.WriteString(". ") + } else { + out.WriteString(" - ") // SP SP - SP + } + for i, blk := range item.Content { + const fourSpace = " " + if i > 0 { + out.WriteString("\n" + fourSpace) + } + p.text(out, blk.(*Paragraph).Text) + out.WriteString("\n") + } + } + } +} + +// text prints the text sequence x to out. +func (p *mdPrinter) text(out *bytes.Buffer, x []Text) { + p.raw.Reset() + p.rawText(&p.raw, x) + line := bytes.TrimSpace(p.raw.Bytes()) + if len(line) == 0 { + return + } + switch line[0] { + case '+', '-', '*', '#': + // Escape what would be the start of an unordered list or heading. + out.WriteByte('\\') + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i := 1 + for i < len(line) && '0' <= line[i] && line[i] <= '9' { + i++ + } + if i < len(line) && (line[i] == '.' || line[i] == ')') { + // Escape what would be the start of an ordered list. + out.Write(line[:i]) + out.WriteByte('\\') + line = line[i:] + } + } + out.Write(line) +} + +// rawText prints the text sequence x to out, +// without worrying about escaping characters +// that have special meaning at the start of a Markdown line. +func (p *mdPrinter) rawText(out *bytes.Buffer, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + p.escape(out, string(t)) + case Italic: + out.WriteString("*") + p.escape(out, string(t)) + out.WriteString("*") + case *Link: + out.WriteString("[") + p.rawText(out, t.Text) + out.WriteString("](") + out.WriteString(t.URL) + out.WriteString(")") + case *DocLink: + url := p.docLinkURL(t) + if url != "" { + out.WriteString("[") + } + p.rawText(out, t.Text) + if url != "" { + out.WriteString("](") + url = strings.ReplaceAll(url, "(", "%28") + url = strings.ReplaceAll(url, ")", "%29") + out.WriteString(url) + out.WriteString(")") + } + } + } +} + +// escape prints s to out as plain text, +// escaping special characters to avoid being misinterpreted +// as Markdown markup sequences. +func (p *mdPrinter) escape(out *bytes.Buffer, s string) { + start := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + case '\n': + // Turn all \n into spaces, for a few reasons: + // - Avoid introducing paragraph breaks accidentally. + // - Avoid the need to reindent after the newline. + // - Avoid problems with Markdown renderers treating + // every mid-paragraph newline as a
. + out.WriteString(s[start:i]) + out.WriteByte(' ') + start = i + 1 + continue + case '`', '_', '*', '[', '<', '\\': + // Not all of these need to be escaped all the time, + // but is valid and easy to do so. + // We assume the Markdown is being passed to a + // Markdown renderer, not edited by a person, + // so it's fine to have escapes that are not strictly + // necessary in some cases. + out.WriteString(s[start:i]) + out.WriteByte('\\') + out.WriteByte(s[i]) + start = i + 1 + } + } + out.WriteString(s[start:]) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/parse.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/parse.go new file mode 100644 index 0000000000..372577b2b3 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/parse.go @@ -0,0 +1,1262 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// A Doc is a parsed Go doc comment. +type Doc struct { + // Content is the sequence of content blocks in the comment. + Content []Block + + // Links is the link definitions in the comment. + Links []*LinkDef +} + +// A LinkDef is a single link definition. +type LinkDef struct { + Text string // the link text + URL string // the link URL + Used bool // whether the comment uses the definition +} + +// A Block is block-level content in a doc comment, +// one of [*Code], [*Heading], [*List], or [*Paragraph]. +type Block interface { + block() +} + +// A Heading is a doc comment heading. +type Heading struct { + Text []Text // the heading text +} + +func (*Heading) block() {} + +// A List is a numbered or bullet list. +// Lists are always non-empty: len(Items) > 0. +// In a numbered list, every Items[i].Number is a non-empty string. +// In a bullet list, every Items[i].Number is an empty string. +type List struct { + // Items is the list items. + Items []*ListItem + + // ForceBlankBefore indicates that the list must be + // preceded by a blank line when reformatting the comment, + // overriding the usual conditions. See the BlankBefore method. + // + // The comment parser sets ForceBlankBefore for any list + // that is preceded by a blank line, to make sure + // the blank line is preserved when printing. + ForceBlankBefore bool + + // ForceBlankBetween indicates that list items must be + // separated by blank lines when reformatting the comment, + // overriding the usual conditions. See the BlankBetween method. + // + // The comment parser sets ForceBlankBetween for any list + // that has a blank line between any two of its items, to make sure + // the blank lines are preserved when printing. + ForceBlankBetween bool +} + +func (*List) block() {} + +// BlankBefore reports whether a reformatting of the comment +// should include a blank line before the list. +// The default rule is the same as for [BlankBetween]: +// if the list item content contains any blank lines +// (meaning at least one item has multiple paragraphs) +// then the list itself must be preceded by a blank line. +// A preceding blank line can be forced by setting [List].ForceBlankBefore. +func (l *List) BlankBefore() bool { + return l.ForceBlankBefore || l.BlankBetween() +} + +// BlankBetween reports whether a reformatting of the comment +// should include a blank line between each pair of list items. +// The default rule is that if the list item content contains any blank lines +// (meaning at least one item has multiple paragraphs) +// then list items must themselves be separated by blank lines. +// Blank line separators can be forced by setting [List].ForceBlankBetween. +func (l *List) BlankBetween() bool { + if l.ForceBlankBetween { + return true + } + for _, item := range l.Items { + if len(item.Content) != 1 { + // Unreachable for parsed comments today, + // since the only way to get multiple item.Content + // is multiple paragraphs, which must have been + // separated by a blank line. + return true + } + } + return false +} + +// A ListItem is a single item in a numbered or bullet list. +type ListItem struct { + // Number is a decimal string in a numbered list + // or an empty string in a bullet list. + Number string // "1", "2", ...; "" for bullet list + + // Content is the list content. + // Currently, restrictions in the parser and printer + // require every element of Content to be a *Paragraph. + Content []Block // Content of this item. +} + +// A Paragraph is a paragraph of text. +type Paragraph struct { + Text []Text +} + +func (*Paragraph) block() {} + +// A Code is a preformatted code block. +type Code struct { + // Text is the preformatted text, ending with a newline character. + // It may be multiple lines, each of which ends with a newline character. + // It is never empty, nor does it start or end with a blank line. + Text string +} + +func (*Code) block() {} + +// A Text is text-level content in a doc comment, +// one of [Plain], [Italic], [*Link], or [*DocLink]. +type Text interface { + text() +} + +// A Plain is a string rendered as plain text (not italicized). +type Plain string + +func (Plain) text() {} + +// An Italic is a string rendered as italicized text. +type Italic string + +func (Italic) text() {} + +// A Link is a link to a specific URL. +type Link struct { + Auto bool // is this an automatic (implicit) link of a literal URL? + Text []Text // text of link + URL string // target URL of link +} + +func (*Link) text() {} + +// A DocLink is a link to documentation for a Go package or symbol. +type DocLink struct { + Text []Text // text of link + + // ImportPath, Recv, and Name identify the Go package or symbol + // that is the link target. The potential combinations of + // non-empty fields are: + // - ImportPath: a link to another package + // - ImportPath, Name: a link to a const, func, type, or var in another package + // - ImportPath, Recv, Name: a link to a method in another package + // - Name: a link to a const, func, type, or var in this package + // - Recv, Name: a link to a method in this package + ImportPath string // import path + Recv string // receiver type, without any pointer star, for methods + Name string // const, func, type, var, or method name +} + +func (*DocLink) text() {} + +// A Parser is a doc comment parser. +// The fields in the struct can be filled in before calling Parse +// in order to customize the details of the parsing process. +type Parser struct { + // Words is a map of Go identifier words that + // should be italicized and potentially linked. + // If Words[w] is the empty string, then the word w + // is only italicized. Otherwise it is linked, using + // Words[w] as the link target. + // Words corresponds to the [go/doc.ToHTML] words parameter. + Words map[string]string + + // LookupPackage resolves a package name to an import path. + // + // If LookupPackage(name) returns ok == true, then [name] + // (or [name.Sym] or [name.Sym.Method]) + // is considered a documentation link to importPath's package docs. + // It is valid to return "", true, in which case name is considered + // to refer to the current package. + // + // If LookupPackage(name) returns ok == false, + // then [name] (or [name.Sym] or [name.Sym.Method]) + // will not be considered a documentation link, + // except in the case where name is the full (but single-element) import path + // of a package in the standard library, such as in [math] or [io.Reader]. + // LookupPackage is still called for such names, + // in order to permit references to imports of other packages + // with the same package names. + // + // Setting LookupPackage to nil is equivalent to setting it to + // a function that always returns "", false. + LookupPackage func(name string) (importPath string, ok bool) + + // LookupSym reports whether a symbol name or method name + // exists in the current package. + // + // If LookupSym("", "Name") returns true, then [Name] + // is considered a documentation link for a const, func, type, or var. + // + // Similarly, if LookupSym("Recv", "Name") returns true, + // then [Recv.Name] is considered a documentation link for + // type Recv's method Name. + // + // Setting LookupSym to nil is equivalent to setting it to a function + // that always returns false. + LookupSym func(recv, name string) (ok bool) +} + +// parseDoc is parsing state for a single doc comment. +type parseDoc struct { + *Parser + *Doc + links map[string]*LinkDef + lines []string + lookupSym func(recv, name string) bool +} + +// lookupPkg is called to look up the pkg in [pkg], [pkg.Name], and [pkg.Name.Recv]. +// If pkg has a slash, it is assumed to be the full import path and is returned with ok = true. +// +// Otherwise, pkg is probably a simple package name like "rand" (not "crypto/rand" or "math/rand"). +// d.LookupPackage provides a way for the caller to allow resolving such names with reference +// to the imports in the surrounding package. +// +// There is one collision between these two cases: single-element standard library names +// like "math" are full import paths but don't contain slashes. We let d.LookupPackage have +// the first chance to resolve it, in case there's a different package imported as math, +// and otherwise we refer to a built-in list of single-element standard library package names. +func (d *parseDoc) lookupPkg(pkg string) (importPath string, ok bool) { + if strings.Contains(pkg, "/") { // assume a full import path + if validImportPath(pkg) { + return pkg, true + } + return "", false + } + if d.LookupPackage != nil { + // Give LookupPackage a chance. + if path, ok := d.LookupPackage(pkg); ok { + return path, true + } + } + return DefaultLookupPackage(pkg) +} + +func isStdPkg(path string) bool { + // TODO(rsc): Use sort.Find once we don't have to worry about + // copying this code into older Go environments. + i := sort.Search(len(stdPkgs), func(i int) bool { return stdPkgs[i] >= path }) + return i < len(stdPkgs) && stdPkgs[i] == path +} + +// DefaultLookupPackage is the default package lookup +// function, used when [Parser].LookupPackage is nil. +// It recognizes names of the packages from the standard +// library with single-element import paths, such as math, +// which would otherwise be impossible to name. +// +// Note that the go/doc package provides a more sophisticated +// lookup based on the imports used in the current package. +func DefaultLookupPackage(name string) (importPath string, ok bool) { + if isStdPkg(name) { + return name, true + } + return "", false +} + +// Parse parses the doc comment text and returns the *Doc form. +// Comment markers (/* // and */) in the text must have already been removed. +func (p *Parser) Parse(text string) *Doc { + lines := unindent(strings.Split(text, "\n")) + d := &parseDoc{ + Parser: p, + Doc: new(Doc), + links: make(map[string]*LinkDef), + lines: lines, + lookupSym: func(recv, name string) bool { return false }, + } + if p.LookupSym != nil { + d.lookupSym = p.LookupSym + } + + // First pass: break into block structure and collect known links. + // The text is all recorded as Plain for now. + var prev span + for _, s := range parseSpans(lines) { + var b Block + switch s.kind { + default: + panic("mvdan.cc/gofumpt/internal/govendor/go/doc/comment: internal error: unknown span kind") + case spanList: + b = d.list(lines[s.start:s.end], prev.end < s.start) + case spanCode: + b = d.code(lines[s.start:s.end]) + case spanOldHeading: + b = d.oldHeading(lines[s.start]) + case spanHeading: + b = d.heading(lines[s.start]) + case spanPara: + b = d.paragraph(lines[s.start:s.end]) + } + if b != nil { + d.Content = append(d.Content, b) + } + prev = s + } + + // Second pass: interpret all the Plain text now that we know the links. + for _, b := range d.Content { + switch b := b.(type) { + case *Paragraph: + b.Text = d.parseLinkedText(string(b.Text[0].(Plain))) + case *List: + for _, i := range b.Items { + for _, c := range i.Content { + p := c.(*Paragraph) + p.Text = d.parseLinkedText(string(p.Text[0].(Plain))) + } + } + } + } + + return d.Doc +} + +// A span represents a single span of comment lines (lines[start:end]) +// of an identified kind (code, heading, paragraph, and so on). +type span struct { + start int + end int + kind spanKind +} + +// A spanKind describes the kind of span. +type spanKind int + +const ( + _ spanKind = iota + spanCode + spanHeading + spanList + spanOldHeading + spanPara +) + +func parseSpans(lines []string) []span { + var spans []span + + // The loop may process a line twice: once as unindented + // and again forced indented. So the maximum expected + // number of iterations is 2*len(lines). The repeating logic + // can be subtle, though, and to protect against introduction + // of infinite loops in future changes, we watch to see that + // we are not looping too much. A panic is better than a + // quiet infinite loop. + watchdog := 2 * len(lines) + + i := 0 + forceIndent := 0 +Spans: + for { + // Skip blank lines. + for i < len(lines) && lines[i] == "" { + i++ + } + if i >= len(lines) { + break + } + if watchdog--; watchdog < 0 { + panic("mvdan.cc/gofumpt/internal/govendor/go/doc/comment: internal error: not making progress") + } + + var kind spanKind + start := i + end := i + if i < forceIndent || indented(lines[i]) { + // Indented (or force indented). + // Ends before next unindented. (Blank lines are OK.) + // If this is an unindented list that we are heuristically treating as indented, + // then accept unindented list item lines up to the first blank lines. + // The heuristic is disabled at blank lines to contain its effect + // to non-gofmt'ed sections of the comment. + unindentedListOK := isList(lines[i]) && i < forceIndent + i++ + for i < len(lines) && (lines[i] == "" || i < forceIndent || indented(lines[i]) || (unindentedListOK && isList(lines[i]))) { + if lines[i] == "" { + unindentedListOK = false + } + i++ + } + + // Drop trailing blank lines. + end = i + for end > start && lines[end-1] == "" { + end-- + } + + // If indented lines are followed (without a blank line) + // by an unindented line ending in a brace, + // take that one line too. This fixes the common mistake + // of pasting in something like + // + // func main() { + // fmt.Println("hello, world") + // } + // + // and forgetting to indent it. + // The heuristic will never trigger on a gofmt'ed comment, + // because any gofmt'ed code block or list would be + // followed by a blank line or end of comment. + if end < len(lines) && strings.HasPrefix(lines[end], "}") { + end++ + } + + if isList(lines[start]) { + kind = spanList + } else { + kind = spanCode + } + } else { + // Unindented. Ends at next blank or indented line. + i++ + for i < len(lines) && lines[i] != "" && !indented(lines[i]) { + i++ + } + end = i + + // If unindented lines are followed (without a blank line) + // by an indented line that would start a code block, + // check whether the final unindented lines + // should be left for the indented section. + // This can happen for the common mistakes of + // unindented code or unindented lists. + // The heuristic will never trigger on a gofmt'ed comment, + // because any gofmt'ed code block would have a blank line + // preceding it after the unindented lines. + if i < len(lines) && lines[i] != "" && !isList(lines[i]) { + switch { + case isList(lines[i-1]): + // If the final unindented line looks like a list item, + // this may be the first indented line wrap of + // a mistakenly unindented list. + // Leave all the unindented list items. + forceIndent = end + end-- + for end > start && isList(lines[end-1]) { + end-- + } + + case strings.HasSuffix(lines[i-1], "{") || strings.HasSuffix(lines[i-1], `\`): + // If the final unindented line ended in { or \ + // it is probably the start of a misindented code block. + // Give the user a single line fix. + // Often that's enough; if not, the user can fix the others themselves. + forceIndent = end + end-- + } + + if start == end && forceIndent > start { + i = start + continue Spans + } + } + + // Span is either paragraph or heading. + if end-start == 1 && isHeading(lines[start]) { + kind = spanHeading + } else if end-start == 1 && isOldHeading(lines[start], lines, start) { + kind = spanOldHeading + } else { + kind = spanPara + } + } + + spans = append(spans, span{start, end, kind}) + i = end + } + + return spans +} + +// indented reports whether line is indented +// (starts with a leading space or tab). +func indented(line string) bool { + return line != "" && (line[0] == ' ' || line[0] == '\t') +} + +// unindent removes any common space/tab prefix +// from each line in lines, returning a copy of lines in which +// those prefixes have been trimmed from each line. +// It also replaces any lines containing only spaces with blank lines (empty strings). +func unindent(lines []string) []string { + // Trim leading and trailing blank lines. + for len(lines) > 0 && isBlank(lines[0]) { + lines = lines[1:] + } + for len(lines) > 0 && isBlank(lines[len(lines)-1]) { + lines = lines[:len(lines)-1] + } + if len(lines) == 0 { + return nil + } + + // Compute and remove common indentation. + prefix := leadingSpace(lines[0]) + for _, line := range lines[1:] { + if !isBlank(line) { + prefix = commonPrefix(prefix, leadingSpace(line)) + } + } + + out := make([]string, len(lines)) + for i, line := range lines { + line = strings.TrimPrefix(line, prefix) + if strings.TrimSpace(line) == "" { + line = "" + } + out[i] = line + } + for len(out) > 0 && out[0] == "" { + out = out[1:] + } + for len(out) > 0 && out[len(out)-1] == "" { + out = out[:len(out)-1] + } + return out +} + +// isBlank reports whether s is a blank line. +func isBlank(s string) bool { + return len(s) == 0 || (len(s) == 1 && s[0] == '\n') +} + +// commonPrefix returns the longest common prefix of a and b. +func commonPrefix(a, b string) string { + i := 0 + for i < len(a) && i < len(b) && a[i] == b[i] { + i++ + } + return a[0:i] +} + +// leadingSpace returns the longest prefix of s consisting of spaces and tabs. +func leadingSpace(s string) string { + i := 0 + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + return s[:i] +} + +// isOldHeading reports whether line is an old-style section heading. +// line is all[off]. +func isOldHeading(line string, all []string, off int) bool { + if off <= 0 || all[off-1] != "" || off+2 >= len(all) || all[off+1] != "" || leadingSpace(all[off+2]) != "" { + return false + } + + line = strings.TrimSpace(line) + + // a heading must start with an uppercase letter + r, _ := utf8.DecodeRuneInString(line) + if !unicode.IsLetter(r) || !unicode.IsUpper(r) { + return false + } + + // it must end in a letter or digit: + r, _ = utf8.DecodeLastRuneInString(line) + if !unicode.IsLetter(r) && !unicode.IsDigit(r) { + return false + } + + // exclude lines with illegal characters. we allow "()," + if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") { + return false + } + + // allow "'" for possessive "'s" only + for b := line; ; { + var ok bool + if _, b, ok = strings.Cut(b, "'"); !ok { + break + } + if b != "s" && !strings.HasPrefix(b, "s ") { + return false // ' not followed by s and then end-of-word + } + } + + // allow "." when followed by non-space + for b := line; ; { + var ok bool + if _, b, ok = strings.Cut(b, "."); !ok { + break + } + if b == "" || strings.HasPrefix(b, " ") { + return false // not followed by non-space + } + } + + return true +} + +// oldHeading returns the *Heading for the given old-style section heading line. +func (d *parseDoc) oldHeading(line string) Block { + return &Heading{Text: []Text{Plain(strings.TrimSpace(line))}} +} + +// isHeading reports whether line is a new-style section heading. +func isHeading(line string) bool { + return len(line) >= 2 && + line[0] == '#' && + (line[1] == ' ' || line[1] == '\t') && + strings.TrimSpace(line) != "#" +} + +// heading returns the *Heading for the given new-style section heading line. +func (d *parseDoc) heading(line string) Block { + return &Heading{Text: []Text{Plain(strings.TrimSpace(line[1:]))}} +} + +// code returns a code block built from the lines. +func (d *parseDoc) code(lines []string) *Code { + body := unindent(lines) + body = append(body, "") // to get final \n from Join + return &Code{Text: strings.Join(body, "\n")} +} + +// paragraph returns a paragraph block built from the lines. +// If the lines are link definitions, paragraph adds them to d and returns nil. +func (d *parseDoc) paragraph(lines []string) Block { + // Is this a block of known links? Handle. + var defs []*LinkDef + for _, line := range lines { + def, ok := parseLink(line) + if !ok { + goto NoDefs + } + defs = append(defs, def) + } + for _, def := range defs { + d.Links = append(d.Links, def) + if d.links[def.Text] == nil { + d.links[def.Text] = def + } + } + return nil +NoDefs: + + return &Paragraph{Text: []Text{Plain(strings.Join(lines, "\n"))}} +} + +// parseLink parses a single link definition line: +// +// [text]: url +// +// It returns the link definition and whether the line was well formed. +func parseLink(line string) (*LinkDef, bool) { + if line == "" || line[0] != '[' { + return nil, false + } + i := strings.Index(line, "]:") + if i < 0 || i+3 >= len(line) || (line[i+2] != ' ' && line[i+2] != '\t') { + return nil, false + } + + text := line[1:i] + url := strings.TrimSpace(line[i+3:]) + j := strings.Index(url, "://") + if j < 0 || !isScheme(url[:j]) { + return nil, false + } + + // Line has right form and has valid scheme://. + // That's good enough for us - we are not as picky + // about the characters beyond the :// as we are + // when extracting inline URLs from text. + return &LinkDef{Text: text, URL: url}, true +} + +// list returns a list built from the indented lines, +// using forceBlankBefore as the value of the List's ForceBlankBefore field. +func (d *parseDoc) list(lines []string, forceBlankBefore bool) *List { + num, _, _ := listMarker(lines[0]) + var ( + list *List = &List{ForceBlankBefore: forceBlankBefore} + item *ListItem + text []string + ) + flush := func() { + if item != nil { + if para := d.paragraph(text); para != nil { + item.Content = append(item.Content, para) + } + } + text = nil + } + + for _, line := range lines { + if n, after, ok := listMarker(line); ok && (n != "") == (num != "") { + // start new list item + flush() + + item = &ListItem{Number: n} + list.Items = append(list.Items, item) + line = after + } + line = strings.TrimSpace(line) + if line == "" { + list.ForceBlankBetween = true + flush() + continue + } + text = append(text, strings.TrimSpace(line)) + } + flush() + return list +} + +// listMarker parses the line as beginning with a list marker. +// If it can do that, it returns the numeric marker ("" for a bullet list), +// the rest of the line, and ok == true. +// Otherwise, it returns "", "", false. +func listMarker(line string) (num, rest string, ok bool) { + line = strings.TrimSpace(line) + if line == "" { + return "", "", false + } + + // Can we find a marker? + if r, n := utf8.DecodeRuneInString(line); r == '•' || r == '*' || r == '+' || r == '-' { + num, rest = "", line[n:] + } else if '0' <= line[0] && line[0] <= '9' { + n := 1 + for n < len(line) && '0' <= line[n] && line[n] <= '9' { + n++ + } + if n >= len(line) || (line[n] != '.' && line[n] != ')') { + return "", "", false + } + num, rest = line[:n], line[n+1:] + } else { + return "", "", false + } + + if !indented(rest) || strings.TrimSpace(rest) == "" { + return "", "", false + } + + return num, rest, true +} + +// isList reports whether the line is the first line of a list, +// meaning starts with a list marker after any indentation. +// (The caller is responsible for checking the line is indented, as appropriate.) +func isList(line string) bool { + _, _, ok := listMarker(line) + return ok +} + +// parseLinkedText parses text that is allowed to contain explicit links, +// such as [math.Sin] or [Go home page], into a slice of Text items. +// +// A “pkg” is only assumed to be a full import path if it starts with +// a domain name (a path element with a dot) or is one of the packages +// from the standard library (“[os]”, “[encoding/json]”, and so on). +// To avoid problems with maps, generics, and array types, doc links +// must be both preceded and followed by punctuation, spaces, tabs, +// or the start or end of a line. An example problem would be treating +// map[ast.Expr]TypeAndValue as containing a link. +func (d *parseDoc) parseLinkedText(text string) []Text { + var out []Text + wrote := 0 + flush := func(i int) { + if wrote < i { + out = d.parseText(out, text[wrote:i], true) + wrote = i + } + } + + start := -1 + var buf []byte + for i := 0; i < len(text); i++ { + c := text[i] + if c == '\n' || c == '\t' { + c = ' ' + } + switch c { + case '[': + start = i + case ']': + if start >= 0 { + if def, ok := d.links[string(buf)]; ok { + def.Used = true + flush(start) + out = append(out, &Link{ + Text: d.parseText(nil, text[start+1:i], false), + URL: def.URL, + }) + wrote = i + 1 + } else if link, ok := d.docLink(text[start+1:i], text[:start], text[i+1:]); ok { + flush(start) + link.Text = d.parseText(nil, text[start+1:i], false) + out = append(out, link) + wrote = i + 1 + } + } + start = -1 + buf = buf[:0] + } + if start >= 0 && i != start { + buf = append(buf, c) + } + } + + flush(len(text)) + return out +} + +// docLink parses text, which was found inside [ ] brackets, +// as a doc link if possible, returning the DocLink and ok == true +// or else nil, false. +// The before and after strings are the text before the [ and after the ] +// on the same line. Doc links must be preceded and followed by +// punctuation, spaces, tabs, or the start or end of a line. +func (d *parseDoc) docLink(text, before, after string) (link *DocLink, ok bool) { + if before != "" { + r, _ := utf8.DecodeLastRuneInString(before) + if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' { + return nil, false + } + } + if after != "" { + r, _ := utf8.DecodeRuneInString(after) + if !unicode.IsPunct(r) && r != ' ' && r != '\t' && r != '\n' { + return nil, false + } + } + text = strings.TrimPrefix(text, "*") + pkg, name, ok := splitDocName(text) + var recv string + if ok { + pkg, recv, _ = splitDocName(pkg) + } + if pkg != "" { + if pkg, ok = d.lookupPkg(pkg); !ok { + return nil, false + } + } else { + if ok = d.lookupSym(recv, name); !ok { + return nil, false + } + } + link = &DocLink{ + ImportPath: pkg, + Recv: recv, + Name: name, + } + return link, true +} + +// If text is of the form before.Name, where Name is a capitalized Go identifier, +// then splitDocName returns before, name, true. +// Otherwise it returns text, "", false. +func splitDocName(text string) (before, name string, foundDot bool) { + i := strings.LastIndex(text, ".") + name = text[i+1:] + if !isName(name) { + return text, "", false + } + if i >= 0 { + before = text[:i] + } + return before, name, true +} + +// parseText parses s as text and returns the result of appending +// those parsed Text elements to out. +// parseText does not handle explicit links like [math.Sin] or [Go home page]: +// those are handled by parseLinkedText. +// If autoLink is true, then parseText recognizes URLs and words from d.Words +// and converts those to links as appropriate. +func (d *parseDoc) parseText(out []Text, s string, autoLink bool) []Text { + var w strings.Builder + wrote := 0 + writeUntil := func(i int) { + w.WriteString(s[wrote:i]) + wrote = i + } + flush := func(i int) { + writeUntil(i) + if w.Len() > 0 { + out = append(out, Plain(w.String())) + w.Reset() + } + } + for i := 0; i < len(s); { + t := s[i:] + if autoLink { + if url, ok := autoURL(t); ok { + flush(i) + // Note: The old comment parser would look up the URL in words + // and replace the target with words[URL] if it was non-empty. + // That would allow creating links that display as one URL but + // when clicked go to a different URL. Not sure what the point + // of that is, so we're not doing that lookup here. + out = append(out, &Link{Auto: true, Text: []Text{Plain(url)}, URL: url}) + i += len(url) + wrote = i + continue + } + if id, ok := ident(t); ok { + url, italics := d.Words[id] + if !italics { + i += len(id) + continue + } + flush(i) + if url == "" { + out = append(out, Italic(id)) + } else { + out = append(out, &Link{Auto: true, Text: []Text{Italic(id)}, URL: url}) + } + i += len(id) + wrote = i + continue + } + } + switch { + case strings.HasPrefix(t, "``"): + if len(t) >= 3 && t[2] == '`' { + // Do not convert `` inside ```, in case people are mistakenly writing Markdown. + i += 3 + for i < len(t) && t[i] == '`' { + i++ + } + break + } + writeUntil(i) + w.WriteRune('“') + i += 2 + wrote = i + case strings.HasPrefix(t, "''"): + writeUntil(i) + w.WriteRune('”') + i += 2 + wrote = i + default: + i++ + } + } + flush(len(s)) + return out +} + +// autoURL checks whether s begins with a URL that should be hyperlinked. +// If so, it returns the URL, which is a prefix of s, and ok == true. +// Otherwise it returns "", false. +// The caller should skip over the first len(url) bytes of s +// before further processing. +func autoURL(s string) (url string, ok bool) { + // Find the ://. Fast path to pick off non-URL, + // since we call this at every position in the string. + // The shortest possible URL is ftp://x, 7 bytes. + var i int + switch { + case len(s) < 7: + return "", false + case s[3] == ':': + i = 3 + case s[4] == ':': + i = 4 + case s[5] == ':': + i = 5 + case s[6] == ':': + i = 6 + default: + return "", false + } + if i+3 > len(s) || s[i:i+3] != "://" { + return "", false + } + + // Check valid scheme. + if !isScheme(s[:i]) { + return "", false + } + + // Scan host part. Must have at least one byte, + // and must start and end in non-punctuation. + i += 3 + if i >= len(s) || !isHost(s[i]) || isPunct(s[i]) { + return "", false + } + i++ + end := i + for i < len(s) && isHost(s[i]) { + if !isPunct(s[i]) { + end = i + 1 + } + i++ + } + i = end + + // At this point we are definitely returning a URL (scheme://host). + // We just have to find the longest path we can add to it. + // Heuristics abound. + // We allow parens, braces, and brackets, + // but only if they match (#5043, #22285). + // We allow .,:;?! in the path but not at the end, + // to avoid end-of-sentence punctuation (#18139, #16565). + stk := []byte{} + end = i +Path: + for ; i < len(s); i++ { + if isPunct(s[i]) { + continue + } + if !isPath(s[i]) { + break + } + switch s[i] { + case '(': + stk = append(stk, ')') + case '{': + stk = append(stk, '}') + case '[': + stk = append(stk, ']') + case ')', '}', ']': + if len(stk) == 0 || stk[len(stk)-1] != s[i] { + break Path + } + stk = stk[:len(stk)-1] + } + if len(stk) == 0 { + end = i + 1 + } + } + + return s[:end], true +} + +// isScheme reports whether s is a recognized URL scheme. +// Note that if strings of new length (beyond 3-7) +// are added here, the fast path at the top of autoURL will need updating. +func isScheme(s string) bool { + switch s { + case "file", + "ftp", + "gopher", + "http", + "https", + "mailto", + "nntp": + return true + } + return false +} + +// isHost reports whether c is a byte that can appear in a URL host, +// like www.example.com or user@[::1]:8080 +func isHost(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// isPunct reports whether c is a punctuation byte that can appear +// inside a path but not at the end. +func isPunct(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// isPath reports whether c is a (non-punctuation) path byte. +func isPath(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// isName reports whether s is a capitalized Go identifier (like Name). +func isName(s string) bool { + t, ok := ident(s) + if !ok || t != s { + return false + } + r, _ := utf8.DecodeRuneInString(s) + return unicode.IsUpper(r) +} + +// ident checks whether s begins with a Go identifier. +// If so, it returns the identifier, which is a prefix of s, and ok == true. +// Otherwise it returns "", false. +// The caller should skip over the first len(id) bytes of s +// before further processing. +func ident(s string) (id string, ok bool) { + // Scan [\pL_][\pL_0-9]* + n := 0 + for n < len(s) { + if c := s[n]; c < utf8.RuneSelf { + if isIdentASCII(c) && (n > 0 || c < '0' || c > '9') { + n++ + continue + } + break + } + r, nr := utf8.DecodeRuneInString(s[n:]) + if unicode.IsLetter(r) { + n += nr + continue + } + break + } + return s[:n], n > 0 +} + +// isIdentASCII reports whether c is an ASCII identifier byte. +func isIdentASCII(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} + +// validImportPath reports whether path is a valid import path. +// It is a lightly edited copy of golang.org/x/mod/module.CheckImportPath. +func validImportPath(path string) bool { + if !utf8.ValidString(path) { + return false + } + if path == "" { + return false + } + if path[0] == '-' { + return false + } + if strings.Contains(path, "//") { + return false + } + if path[len(path)-1] == '/' { + return false + } + elemStart := 0 + for i, r := range path { + if r == '/' { + if !validImportPathElem(path[elemStart:i]) { + return false + } + elemStart = i + 1 + } + } + return validImportPathElem(path[elemStart:]) +} + +func validImportPathElem(elem string) bool { + if elem == "" || elem[0] == '.' || elem[len(elem)-1] == '.' { + return false + } + for i := 0; i < len(elem); i++ { + if !importPathOK(elem[i]) { + return false + } + } + return true +} + +func importPathOK(c byte) bool { + // mask is a 128-bit bitmap with 1s for allowed bytes, + // so that the byte c can be tested with a shift and an and. + // If c > 128, then 1<>64)) != 0 +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/print.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/print.go new file mode 100644 index 0000000000..e1c070d5a5 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/print.go @@ -0,0 +1,288 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "strings" +) + +// A Printer is a doc comment printer. +// The fields in the struct can be filled in before calling +// any of the printing methods +// in order to customize the details of the printing process. +type Printer struct { + // HeadingLevel is the nesting level used for + // HTML and Markdown headings. + // If HeadingLevel is zero, it defaults to level 3, + // meaning to use

and ###. + HeadingLevel int + + // HeadingID is a function that computes the heading ID + // (anchor tag) to use for the heading h when generating + // HTML and Markdown. If HeadingID returns an empty string, + // then the heading ID is omitted. + // If HeadingID is nil, h.DefaultID is used. + HeadingID func(h *Heading) string + + // DocLinkURL is a function that computes the URL for the given DocLink. + // If DocLinkURL is nil, then link.DefaultURL(p.DocLinkBaseURL) is used. + DocLinkURL func(link *DocLink) string + + // DocLinkBaseURL is used when DocLinkURL is nil, + // passed to [DocLink.DefaultURL] to construct a DocLink's URL. + // See that method's documentation for details. + DocLinkBaseURL string + + // TextPrefix is a prefix to print at the start of every line + // when generating text output using the Text method. + TextPrefix string + + // TextCodePrefix is the prefix to print at the start of each + // preformatted (code block) line when generating text output, + // instead of (not in addition to) TextPrefix. + // If TextCodePrefix is the empty string, it defaults to TextPrefix+"\t". + TextCodePrefix string + + // TextWidth is the maximum width text line to generate, + // measured in Unicode code points, + // excluding TextPrefix and the newline character. + // If TextWidth is zero, it defaults to 80 minus the number of code points in TextPrefix. + // If TextWidth is negative, there is no limit. + TextWidth int +} + +func (p *Printer) headingLevel() int { + if p.HeadingLevel <= 0 { + return 3 + } + return p.HeadingLevel +} + +func (p *Printer) headingID(h *Heading) string { + if p.HeadingID == nil { + return h.DefaultID() + } + return p.HeadingID(h) +} + +func (p *Printer) docLinkURL(link *DocLink) string { + if p.DocLinkURL != nil { + return p.DocLinkURL(link) + } + return link.DefaultURL(p.DocLinkBaseURL) +} + +// DefaultURL constructs and returns the documentation URL for l, +// using baseURL as a prefix for links to other packages. +// +// The possible forms returned by DefaultURL are: +// - baseURL/ImportPath, for a link to another package +// - baseURL/ImportPath#Name, for a link to a const, func, type, or var in another package +// - baseURL/ImportPath#Recv.Name, for a link to a method in another package +// - #Name, for a link to a const, func, type, or var in this package +// - #Recv.Name, for a link to a method in this package +// +// If baseURL ends in a trailing slash, then DefaultURL inserts +// a slash between ImportPath and # in the anchored forms. +// For example, here are some baseURL values and URLs they can generate: +// +// "/pkg/" → "/pkg/math/#Sqrt" +// "/pkg" → "/pkg/math#Sqrt" +// "/" → "/math/#Sqrt" +// "" → "/math#Sqrt" +func (l *DocLink) DefaultURL(baseURL string) string { + if l.ImportPath != "" { + slash := "" + if strings.HasSuffix(baseURL, "/") { + slash = "/" + } else { + baseURL += "/" + } + switch { + case l.Name == "": + return baseURL + l.ImportPath + slash + case l.Recv != "": + return baseURL + l.ImportPath + slash + "#" + l.Recv + "." + l.Name + default: + return baseURL + l.ImportPath + slash + "#" + l.Name + } + } + if l.Recv != "" { + return "#" + l.Recv + "." + l.Name + } + return "#" + l.Name +} + +// DefaultID returns the default anchor ID for the heading h. +// +// The default anchor ID is constructed by converting every +// rune that is not alphanumeric ASCII to an underscore +// and then adding the prefix “hdr-”. +// For example, if the heading text is “Go Doc Comments”, +// the default ID is “hdr-Go_Doc_Comments”. +func (h *Heading) DefaultID() string { + // Note: The “hdr-” prefix is important to avoid DOM clobbering attacks. + // See https://pkg.go.dev/github.com/google/safehtml#Identifier. + var out strings.Builder + var p textPrinter + p.oneLongLine(&out, h.Text) + s := strings.TrimSpace(out.String()) + if s == "" { + return "" + } + out.Reset() + out.WriteString("hdr-") + for _, r := range s { + if r < 0x80 && isIdentASCII(byte(r)) { + out.WriteByte(byte(r)) + } else { + out.WriteByte('_') + } + } + return out.String() +} + +type commentPrinter struct { + *Printer +} + +// Comment returns the standard Go formatting of the Doc, +// without any comment markers. +func (p *Printer) Comment(d *Doc) []byte { + cp := &commentPrinter{Printer: p} + var out bytes.Buffer + for i, x := range d.Content { + if i > 0 && blankBefore(x) { + out.WriteString("\n") + } + cp.block(&out, x) + } + + // Print one block containing all the link definitions that were used, + // and then a second block containing all the unused ones. + // This makes it easy to clean up the unused ones: gofmt and + // delete the final block. And it's a nice visual signal without + // affecting the way the comment formats for users. + for i := 0; i < 2; i++ { + used := i == 0 + first := true + for _, def := range d.Links { + if def.Used == used { + if first { + out.WriteString("\n") + first = false + } + out.WriteString("[") + out.WriteString(def.Text) + out.WriteString("]: ") + out.WriteString(def.URL) + out.WriteString("\n") + } + } + } + + return out.Bytes() +} + +// blankBefore reports whether the block x requires a blank line before it. +// All blocks do, except for Lists that return false from x.BlankBefore(). +func blankBefore(x Block) bool { + if x, ok := x.(*List); ok { + return x.BlankBefore() + } + return true +} + +// block prints the block x to out. +func (p *commentPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T", x) + + case *Paragraph: + p.text(out, "", x.Text) + out.WriteString("\n") + + case *Heading: + out.WriteString("# ") + p.text(out, "", x.Text) + out.WriteString("\n") + + case *Code: + md := x.Text + for md != "" { + var line string + line, md, _ = strings.Cut(md, "\n") + if line != "" { + out.WriteString("\t") + out.WriteString(line) + } + out.WriteString("\n") + } + + case *List: + loose := x.BlankBetween() + for i, item := range x.Items { + if i > 0 && loose { + out.WriteString("\n") + } + out.WriteString(" ") + if item.Number == "" { + out.WriteString(" - ") + } else { + out.WriteString(item.Number) + out.WriteString(". ") + } + for i, blk := range item.Content { + const fourSpace = " " + if i > 0 { + out.WriteString("\n" + fourSpace) + } + p.text(out, fourSpace, blk.(*Paragraph).Text) + out.WriteString("\n") + } + } + } +} + +// text prints the text sequence x to out. +func (p *commentPrinter) text(out *bytes.Buffer, indent string, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + p.indent(out, indent, string(t)) + case Italic: + p.indent(out, indent, string(t)) + case *Link: + if t.Auto { + p.text(out, indent, t.Text) + } else { + out.WriteString("[") + p.text(out, indent, t.Text) + out.WriteString("]") + } + case *DocLink: + out.WriteString("[") + p.text(out, indent, t.Text) + out.WriteString("]") + } + } +} + +// indent prints s to out, indenting with the indent string +// after each newline in s. +func (p *commentPrinter) indent(out *bytes.Buffer, indent, s string) { + for s != "" { + line, rest, ok := strings.Cut(s, "\n") + out.WriteString(line) + if ok { + out.WriteString("\n") + out.WriteString(indent) + } + s = rest + } +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/std.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/std.go new file mode 100644 index 0000000000..d128eda8c5 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/std.go @@ -0,0 +1,47 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by 'go generate' DO NOT EDIT. +//disabled go:generate ./mkstd.sh + +package comment + +var stdPkgs = []string{ + "bufio", + "bytes", + "cmp", + "context", + "crypto", + "embed", + "encoding", + "errors", + "expvar", + "flag", + "fmt", + "hash", + "html", + "image", + "io", + "log", + "maps", + "math", + "mime", + "net", + "os", + "path", + "plugin", + "reflect", + "regexp", + "runtime", + "slices", + "sort", + "strconv", + "strings", + "sync", + "syscall", + "testing", + "time", + "unicode", + "unsafe", +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/text.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/text.go new file mode 100644 index 0000000000..6f9c2e201d --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/doc/comment/text.go @@ -0,0 +1,337 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package comment + +import ( + "bytes" + "fmt" + "sort" + "strings" + "unicode/utf8" +) + +// A textPrinter holds the state needed for printing a Doc as plain text. +type textPrinter struct { + *Printer + long strings.Builder + prefix string + codePrefix string + width int +} + +// Text returns a textual formatting of the Doc. +// See the [Printer] documentation for ways to customize the text output. +func (p *Printer) Text(d *Doc) []byte { + tp := &textPrinter{ + Printer: p, + prefix: p.TextPrefix, + codePrefix: p.TextCodePrefix, + width: p.TextWidth, + } + if tp.codePrefix == "" { + tp.codePrefix = p.TextPrefix + "\t" + } + if tp.width == 0 { + tp.width = 80 - utf8.RuneCountInString(tp.prefix) + } + + var out bytes.Buffer + for i, x := range d.Content { + if i > 0 && blankBefore(x) { + out.WriteString(tp.prefix) + writeNL(&out) + } + tp.block(&out, x) + } + anyUsed := false + for _, def := range d.Links { + if def.Used { + anyUsed = true + break + } + } + if anyUsed { + writeNL(&out) + for _, def := range d.Links { + if def.Used { + fmt.Fprintf(&out, "[%s]: %s\n", def.Text, def.URL) + } + } + } + return out.Bytes() +} + +// writeNL calls out.WriteByte('\n') +// but first trims trailing spaces on the previous line. +func writeNL(out *bytes.Buffer) { + // Trim trailing spaces. + data := out.Bytes() + n := 0 + for n < len(data) && (data[len(data)-n-1] == ' ' || data[len(data)-n-1] == '\t') { + n++ + } + if n > 0 { + out.Truncate(len(data) - n) + } + out.WriteByte('\n') +} + +// block prints the block x to out. +func (p *textPrinter) block(out *bytes.Buffer, x Block) { + switch x := x.(type) { + default: + fmt.Fprintf(out, "?%T\n", x) + + case *Paragraph: + out.WriteString(p.prefix) + p.text(out, "", x.Text) + + case *Heading: + out.WriteString(p.prefix) + out.WriteString("# ") + p.text(out, "", x.Text) + + case *Code: + text := x.Text + for text != "" { + var line string + line, text, _ = strings.Cut(text, "\n") + if line != "" { + out.WriteString(p.codePrefix) + out.WriteString(line) + } + writeNL(out) + } + + case *List: + loose := x.BlankBetween() + for i, item := range x.Items { + if i > 0 && loose { + out.WriteString(p.prefix) + writeNL(out) + } + out.WriteString(p.prefix) + out.WriteString(" ") + if item.Number == "" { + out.WriteString(" - ") + } else { + out.WriteString(item.Number) + out.WriteString(". ") + } + for i, blk := range item.Content { + const fourSpace = " " + if i > 0 { + writeNL(out) + out.WriteString(p.prefix) + out.WriteString(fourSpace) + } + p.text(out, fourSpace, blk.(*Paragraph).Text) + } + } + } +} + +// text prints the text sequence x to out. +func (p *textPrinter) text(out *bytes.Buffer, indent string, x []Text) { + p.oneLongLine(&p.long, x) + words := strings.Fields(p.long.String()) + p.long.Reset() + + var seq []int + if p.width < 0 || len(words) == 0 { + seq = []int{0, len(words)} // one long line + } else { + seq = wrap(words, p.width-utf8.RuneCountInString(indent)) + } + for i := 0; i+1 < len(seq); i++ { + if i > 0 { + out.WriteString(p.prefix) + out.WriteString(indent) + } + for j, w := range words[seq[i]:seq[i+1]] { + if j > 0 { + out.WriteString(" ") + } + out.WriteString(w) + } + writeNL(out) + } +} + +// oneLongLine prints the text sequence x to out as one long line, +// without worrying about line wrapping. +// Explicit links have the [ ] dropped to improve readability. +func (p *textPrinter) oneLongLine(out *strings.Builder, x []Text) { + for _, t := range x { + switch t := t.(type) { + case Plain: + out.WriteString(string(t)) + case Italic: + out.WriteString(string(t)) + case *Link: + p.oneLongLine(out, t.Text) + case *DocLink: + p.oneLongLine(out, t.Text) + } + } +} + +// wrap wraps words into lines of at most max runes, +// minimizing the sum of the squares of the leftover lengths +// at the end of each line (except the last, of course), +// with a preference for ending lines at punctuation (.,:;). +// +// The returned slice gives the indexes of the first words +// on each line in the wrapped text with a final entry of len(words). +// Thus the lines are words[seq[0]:seq[1]], words[seq[1]:seq[2]], +// ..., words[seq[len(seq)-2]:seq[len(seq)-1]]. +// +// The implementation runs in O(n log n) time, where n = len(words), +// using the algorithm described in D. S. Hirschberg and L. L. Larmore, +// “[The least weight subsequence problem],” FOCS 1985, pp. 137-143. +// +// [The least weight subsequence problem]: https://doi.org/10.1109/SFCS.1985.60 +func wrap(words []string, max int) (seq []int) { + // The algorithm requires that our scoring function be concave, + // meaning that for all i₀ ≤ i₁ < j₀ ≤ j₁, + // weight(i₀, j₀) + weight(i₁, j₁) ≤ weight(i₀, j₁) + weight(i₁, j₀). + // + // Our weights are two-element pairs [hi, lo] + // ordered by elementwise comparison. + // The hi entry counts the weight for lines that are longer than max, + // and the lo entry counts the weight for lines that are not. + // This forces the algorithm to first minimize the number of lines + // that are longer than max, which correspond to lines with + // single very long words. Having done that, it can move on to + // minimizing the lo score, which is more interesting. + // + // The lo score is the sum for each line of the square of the + // number of spaces remaining at the end of the line and a + // penalty of 64 given out for not ending the line in a + // punctuation character (.,:;). + // The penalty is somewhat arbitrarily chosen by trying + // different amounts and judging how nice the wrapped text looks. + // Roughly speaking, using 64 means that we are willing to + // end a line with eight blank spaces in order to end at a + // punctuation character, even if the next word would fit in + // those spaces. + // + // We care about ending in punctuation characters because + // it makes the text easier to skim if not too many sentences + // or phrases begin with a single word on the previous line. + + // A score is the score (also called weight) for a given line. + // add and cmp add and compare scores. + type score struct { + hi int64 + lo int64 + } + add := func(s, t score) score { return score{s.hi + t.hi, s.lo + t.lo} } + cmp := func(s, t score) int { + switch { + case s.hi < t.hi: + return -1 + case s.hi > t.hi: + return +1 + case s.lo < t.lo: + return -1 + case s.lo > t.lo: + return +1 + } + return 0 + } + + // total[j] is the total number of runes + // (including separating spaces) in words[:j]. + total := make([]int, len(words)+1) + total[0] = 0 + for i, s := range words { + total[1+i] = total[i] + utf8.RuneCountInString(s) + 1 + } + + // weight returns weight(i, j). + weight := func(i, j int) score { + // On the last line, there is zero weight for being too short. + n := total[j] - 1 - total[i] + if j == len(words) && n <= max { + return score{0, 0} + } + + // Otherwise the weight is the penalty plus the square of the number of + // characters remaining on the line or by which the line goes over. + // In the latter case, that value goes in the hi part of the score. + // (See note above.) + p := wrapPenalty(words[j-1]) + v := int64(max-n) * int64(max-n) + if n > max { + return score{v, p} + } + return score{0, v + p} + } + + // The rest of this function is “The Basic Algorithm” from + // Hirschberg and Larmore's conference paper, + // using the same names as in the paper. + f := []score{{0, 0}} + g := func(i, j int) score { return add(f[i], weight(i, j)) } + + bridge := func(a, b, c int) bool { + k := c + sort.Search(len(words)+1-c, func(k int) bool { + k += c + return cmp(g(a, k), g(b, k)) > 0 + }) + if k > len(words) { + return true + } + return cmp(g(c, k), g(b, k)) <= 0 + } + + // d is a one-ended deque implemented as a slice. + d := make([]int, 1, len(words)) + d[0] = 0 + bestleft := make([]int, 1, len(words)) + bestleft[0] = -1 + for m := 1; m < len(words); m++ { + f = append(f, g(d[0], m)) + bestleft = append(bestleft, d[0]) + for len(d) > 1 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 { + d = d[1:] // “Retire” + } + for len(d) > 1 && bridge(d[len(d)-2], d[len(d)-1], m) { + d = d[:len(d)-1] // “Fire” + } + if cmp(g(m, len(words)), g(d[len(d)-1], len(words))) < 0 { + d = append(d, m) // “Hire” + // The next few lines are not in the paper but are necessary + // to handle two-word inputs correctly. It appears to be + // just a bug in the paper's pseudocode. + if len(d) == 2 && cmp(g(d[1], m+1), g(d[0], m+1)) <= 0 { + d = d[1:] + } + } + } + bestleft = append(bestleft, d[0]) + + // Recover least weight sequence from bestleft. + n := 1 + for m := len(words); m > 0; m = bestleft[m] { + n++ + } + seq = make([]int, n) + for m := len(words); m > 0; m = bestleft[m] { + n-- + seq[n] = m + } + return seq +} + +// wrapPenalty is the penalty for inserting a line break after word s. +func wrapPenalty(s string) int64 { + switch s[len(s)-1] { + case '.', ',', ':', ';': + return 0 + } + return 64 +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/format.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/format.go new file mode 100644 index 0000000000..5540686ed0 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/format.go @@ -0,0 +1,134 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format implements standard formatting of Go source. +// +// Note that formatting of Go source code changes over time, so tools relying on +// consistent formatting should execute a specific version of the gofmt binary +// instead of using this package. That way, the formatting will be stable, and +// the tools won't need to be recompiled each time gofmt changes. +// +// For example, pre-submit checks that use this package directly would behave +// differently depending on what Go version each developer uses, causing the +// check to be inherently fragile. +package format + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + + "mvdan.cc/gofumpt/internal/govendor/go/printer" +) + +// Keep these in sync with cmd/gofmt/gofmt.go. +const ( + tabWidth = 8 + printerMode = printer.UseSpaces | printer.TabIndent | printerNormalizeNumbers + + // printerNormalizeNumbers means to canonicalize number literal prefixes + // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. + // + // This value is defined in mvdan.cc/gofumpt/internal/govendor/go/printer specifically for mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt. + printerNormalizeNumbers = 1 << 30 +) + +var config = printer.Config{Mode: printerMode, Tabwidth: tabWidth} + +const parserMode = parser.ParseComments | parser.SkipObjectResolution + +// Node formats node in canonical gofmt style and writes the result to dst. +// +// The node type must be *ast.File, *printer.CommentedNode, []ast.Decl, +// []ast.Stmt, or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, +// or ast.Stmt. Node does not modify node. Imports are not sorted for +// nodes representing partial source files (for instance, if the node is +// not an *ast.File or a *printer.CommentedNode not wrapping an *ast.File). +// +// The function may return early (before the entire result is written) +// and return a formatting error, for instance due to an incorrect AST. +func Node(dst io.Writer, fset *token.FileSet, node any) error { + // Determine if we have a complete source file (file != nil). + var file *ast.File + var cnode *printer.CommentedNode + switch n := node.(type) { + case *ast.File: + file = n + case *printer.CommentedNode: + if f, ok := n.Node.(*ast.File); ok { + file = f + cnode = n + } + } + + // Sort imports if necessary. + if file != nil && hasUnsortedImports(file) { + // Make a copy of the AST because ast.SortImports is destructive. + // TODO(gri) Do this more efficiently. + var buf bytes.Buffer + err := config.Fprint(&buf, fset, file) + if err != nil { + return err + } + file, err = parser.ParseFile(fset, "", buf.Bytes(), parserMode) + if err != nil { + // We should never get here. If we do, provide good diagnostic. + return fmt.Errorf("format.Node internal error (%s)", err) + } + ast.SortImports(fset, file) + + // Use new file with sorted imports. + node = file + if cnode != nil { + node = &printer.CommentedNode{Node: file, Comments: cnode.Comments} + } + } + + return config.Fprint(dst, fset, node) +} + +// Source formats src in canonical gofmt style and returns the result +// or an (I/O or syntax) error. src is expected to be a syntactically +// correct Go source file, or a list of Go declarations or statements. +// +// If src is a partial source file, the leading and trailing space of src +// is applied to the result (such that it has the same leading and trailing +// space as src), and the result is indented by the same amount as the first +// line of src containing code. Imports are not sorted for partial source files. +func Source(src []byte) ([]byte, error) { + fset := token.NewFileSet() + file, sourceAdj, indentAdj, err := parse(fset, "", src, true) + if err != nil { + return nil, err + } + + if sourceAdj == nil { + // Complete source file. + // TODO(gri) consider doing this always. + ast.SortImports(fset, file) + } + + return format(fset, file, sourceAdj, indentAdj, src, config) +} + +func hasUnsortedImports(file *ast.File) bool { + for _, d := range file.Decls { + d, ok := d.(*ast.GenDecl) + if !ok || d.Tok != token.IMPORT { + // Not an import declaration, so we're done. + // Imports are always first. + return false + } + if d.Lparen.IsValid() { + // For now assume all grouped imports are unsorted. + // TODO(gri) Should check if they are sorted already. + return true + } + // Ungrouped imports are sorted by default. + } + return false +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/internal.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/internal.go new file mode 100644 index 0000000000..df03587143 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/format/internal.go @@ -0,0 +1,177 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri): This file and the file src/cmd/gofmt/internal.go are +// the same (but for this comment and the package name). Do not modify +// one without the other. Determine if we can factor out functionality +// in a public API. See also #11844 for context. + +package format + +import ( + "bytes" + "go/ast" + "go/parser" + "go/token" + "strings" + + "mvdan.cc/gofumpt/internal/govendor/go/printer" +) + +// parse parses src, which was read from the named file, +// as a Go source file, declaration, or statement list. +func parse(fset *token.FileSet, filename string, src []byte, fragmentOk bool) ( + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + err error, +) { + // Try as whole source file. + file, err = parser.ParseFile(fset, filename, src, parserMode) + // If there's no error, return. If the error is that the source file didn't begin with a + // package line and source fragments are ok, fall through to + // try as a source fragment. Stop and return on any other error. + if err == nil || !fragmentOk || !strings.Contains(err.Error(), "expected 'package'") { + return + } + + // If this is a declaration list, make it a source file + // by inserting a package clause. + // Insert using a ';', not a newline, so that the line numbers + // in psrc match the ones in src. + psrc := append([]byte("package p;"), src...) + file, err = parser.ParseFile(fset, filename, psrc, parserMode) + if err == nil { + sourceAdj = func(src []byte, indent int) []byte { + // Remove the package clause. + // Gofmt has turned the ';' into a '\n'. + src = src[indent+len("package p\n"):] + return bytes.TrimSpace(src) + } + return + } + // If the error is that the source file didn't begin with a + // declaration, fall through to try as a statement list. + // Stop and return on any other error. + if !strings.Contains(err.Error(), "expected declaration") { + return + } + + // If this is a statement list, make it a source file + // by inserting a package clause and turning the list + // into a function body. This handles expressions too. + // Insert using a ';', not a newline, so that the line numbers + // in fsrc match the ones in src. Add an extra '\n' before the '}' + // to make sure comments are flushed before the '}'. + fsrc := append(append([]byte("package p; func _() {"), src...), '\n', '\n', '}') + file, err = parser.ParseFile(fset, filename, fsrc, parserMode) + if err == nil { + sourceAdj = func(src []byte, indent int) []byte { + // Cap adjusted indent to zero. + if indent < 0 { + indent = 0 + } + // Remove the wrapping. + // Gofmt has turned the "; " into a "\n\n". + // There will be two non-blank lines with indent, hence 2*indent. + src = src[2*indent+len("package p\n\nfunc _() {"):] + // Remove only the "}\n" suffix: remaining whitespaces will be trimmed anyway + src = src[:len(src)-len("}\n")] + return bytes.TrimSpace(src) + } + // Gofmt has also indented the function body one level. + // Adjust that with indentAdj. + indentAdj = -1 + } + + // Succeeded, or out of options. + return +} + +// format formats the given package file originally obtained from src +// and adjusts the result based on the original source via sourceAdj +// and indentAdj. +func format( + fset *token.FileSet, + file *ast.File, + sourceAdj func(src []byte, indent int) []byte, + indentAdj int, + src []byte, + cfg printer.Config, +) ([]byte, error) { + if sourceAdj == nil { + // Complete source file. + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + + // Partial source file. + // Determine and prepend leading space. + i, j := 0, 0 + for j < len(src) && isSpace(src[j]) { + if src[j] == '\n' { + i = j + 1 // byte offset of last line in leading space + } + j++ + } + var res []byte + res = append(res, src[:i]...) + + // Determine and prepend indentation of first code line. + // Spaces are ignored unless there are no tabs, + // in which case spaces count as one tab. + indent := 0 + hasSpace := false + for _, b := range src[i:j] { + switch b { + case ' ': + hasSpace = true + case '\t': + indent++ + } + } + if indent == 0 && hasSpace { + indent = 1 + } + for i := 0; i < indent; i++ { + res = append(res, '\t') + } + + // Format the source. + // Write it without any leading and trailing space. + cfg.Indent = indent + indentAdj + var buf bytes.Buffer + err := cfg.Fprint(&buf, fset, file) + if err != nil { + return nil, err + } + out := sourceAdj(buf.Bytes(), cfg.Indent) + + // If the adjusted output is empty, the source + // was empty but (possibly) for white space. + // The result is the incoming source. + if len(out) == 0 { + return src, nil + } + + // Otherwise, append output to leading space. + res = append(res, out...) + + // Determine and append trailing space. + i = len(src) + for i > 0 && isSpace(src[i-1]) { + i-- + } + return append(res, src[i:]...), nil +} + +// isSpace reports whether the byte is a space character. +// isSpace defines a space as being among the following bytes: ' ', '\t', '\n' and '\r'. +func isSpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/comment.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/comment.go new file mode 100644 index 0000000000..1f0e7df9dd --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/comment.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package printer + +import ( + "go/ast" + "strings" + + "mvdan.cc/gofumpt/internal/govendor/go/doc/comment" +) + +// formatDocComment reformats the doc comment list, +// returning the canonical formatting. +func formatDocComment(list []*ast.Comment) []*ast.Comment { + // Extract comment text (removing comment markers). + var kind, text string + var directives []*ast.Comment + if len(list) == 1 && strings.HasPrefix(list[0].Text, "/*") { + kind = "/*" + text = list[0].Text + if !strings.Contains(text, "\n") || allStars(text) { + // Single-line /* .. */ comment in doc comment position, + // or multiline old-style comment like + // /* + // * Comment + // * text here. + // */ + // Should not happen, since it will not work well as a + // doc comment, but if it does, just ignore: + // reformatting it will only make the situation worse. + return list + } + text = text[2 : len(text)-2] // cut /* and */ + } else if strings.HasPrefix(list[0].Text, "//") { + kind = "//" + var b strings.Builder + for _, c := range list { + after, found := strings.CutPrefix(c.Text, "//") + if !found { + return list + } + // Accumulate //go:build etc lines separately. + if isDirective(after) { + directives = append(directives, c) + continue + } + b.WriteString(strings.TrimPrefix(after, " ")) + b.WriteString("\n") + } + text = b.String() + } else { + // Not sure what this is, so leave alone. + return list + } + + if text == "" { + return list + } + + // Parse comment and reformat as text. + var p comment.Parser + d := p.Parse(text) + + var pr comment.Printer + text = string(pr.Comment(d)) + + // For /* */ comment, return one big comment with text inside. + slash := list[0].Slash + if kind == "/*" { + c := &ast.Comment{ + Slash: slash, + Text: "/*\n" + text + "*/", + } + return []*ast.Comment{c} + } + + // For // comment, return sequence of // lines. + var out []*ast.Comment + for text != "" { + var line string + line, text, _ = strings.Cut(text, "\n") + if line == "" { + line = "//" + } else if strings.HasPrefix(line, "\t") { + line = "//" + line + } else { + line = "// " + line + } + out = append(out, &ast.Comment{ + Slash: slash, + Text: line, + }) + } + if len(directives) > 0 { + out = append(out, &ast.Comment{ + Slash: slash, + Text: "//", + }) + for _, c := range directives { + out = append(out, &ast.Comment{ + Slash: slash, + Text: c.Text, + }) + } + } + return out +} + +// isDirective reports whether c is a comment directive. +// See go.dev/issue/37974. +// This code is also in go/ast. +func isDirective(c string) bool { + // "//line " is a line directive. + // "//extern " is for gccgo. + // "//export " is for cgo. + // (The // has been removed.) + if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") { + return true + } + + // "//[a-z0-9]+:[a-z0-9]" + // (The // has been removed.) + colon := strings.Index(c, ":") + if colon <= 0 || colon+1 >= len(c) { + return false + } + for i := 0; i <= colon+1; i++ { + if i == colon { + continue + } + b := c[i] + if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') { + return false + } + } + return true +} + +// allStars reports whether text is the interior of an +// old-style /* */ comment with a star at the start of each line. +func allStars(text string) bool { + for i := 0; i < len(text); i++ { + if text[i] == '\n' { + j := i + 1 + for j < len(text) && (text[j] == ' ' || text[j] == '\t') { + j++ + } + if j < len(text) && text[j] != '*' { + return false + } + } + } + return true +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/gobuild.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/gobuild.go new file mode 100644 index 0000000000..f00492d077 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/gobuild.go @@ -0,0 +1,170 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package printer + +import ( + "go/build/constraint" + "sort" + "text/tabwriter" +) + +func (p *printer) fixGoBuildLines() { + if len(p.goBuild)+len(p.plusBuild) == 0 { + return + } + + // Find latest possible placement of //go:build and // +build comments. + // That's just after the last blank line before we find a non-comment. + // (We'll add another blank line after our comment block.) + // When we start dropping // +build comments, we can skip over /* */ comments too. + // Note that we are processing tabwriter input, so every comment + // begins and ends with a tabwriter.Escape byte. + // And some newlines have turned into \f bytes. + insert := 0 + for pos := 0; ; { + // Skip leading space at beginning of line. + blank := true + for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') { + pos++ + } + // Skip over // comment if any. + if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' { + blank = false + for pos < len(p.output) && !isNL(p.output[pos]) { + pos++ + } + } + // Skip over \n at end of line. + if pos >= len(p.output) || !isNL(p.output[pos]) { + break + } + pos++ + + if blank { + insert = pos + } + } + + // If there is a //go:build comment before the place we identified, + // use that point instead. (Earlier in the file is always fine.) + if len(p.goBuild) > 0 && p.goBuild[0] < insert { + insert = p.goBuild[0] + } else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert { + insert = p.plusBuild[0] + } + + var x constraint.Expr + switch len(p.goBuild) { + case 0: + // Synthesize //go:build expression from // +build lines. + for _, pos := range p.plusBuild { + y, err := constraint.Parse(p.commentTextAt(pos)) + if err != nil { + x = nil + break + } + if x == nil { + x = y + } else { + x = &constraint.AndExpr{X: x, Y: y} + } + } + case 1: + // Parse //go:build expression. + x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0])) + } + + var block []byte + if x == nil { + // Don't have a valid //go:build expression to treat as truth. + // Bring all the lines together but leave them alone. + // Note that these are already tabwriter-escaped. + for _, pos := range p.goBuild { + block = append(block, p.lineAt(pos)...) + } + for _, pos := range p.plusBuild { + block = append(block, p.lineAt(pos)...) + } + } else { + block = append(block, tabwriter.Escape) + block = append(block, "//go:build "...) + block = append(block, x.String()...) + block = append(block, tabwriter.Escape, '\n') + if len(p.plusBuild) > 0 { + lines, err := constraint.PlusBuildLines(x) + if err != nil { + lines = []string{"// +build error: " + err.Error()} + } + for _, line := range lines { + block = append(block, tabwriter.Escape) + block = append(block, line...) + block = append(block, tabwriter.Escape, '\n') + } + } + } + block = append(block, '\n') + + // Build sorted list of lines to delete from remainder of output. + toDelete := append(p.goBuild, p.plusBuild...) + sort.Ints(toDelete) + + // Collect output after insertion point, with lines deleted, into after. + var after []byte + start := insert + for _, end := range toDelete { + if end < start { + continue + } + after = appendLines(after, p.output[start:end]) + start = end + len(p.lineAt(end)) + } + after = appendLines(after, p.output[start:]) + if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) { + after = after[:n-1] + } + + p.output = p.output[:insert] + p.output = append(p.output, block...) + p.output = append(p.output, after...) +} + +// appendLines is like append(x, y...) +// but it avoids creating doubled blank lines, +// which would not be gofmt-standard output. +// It assumes that only whole blocks of lines are being appended, +// not line fragments. +func appendLines(x, y []byte) []byte { + if len(y) > 0 && isNL(y[0]) && // y starts in blank line + (len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line + y = y[1:] // delete y's leading blank line + } + return append(x, y...) +} + +func (p *printer) lineAt(start int) []byte { + pos := start + for pos < len(p.output) && !isNL(p.output[pos]) { + pos++ + } + if pos < len(p.output) { + pos++ + } + return p.output[start:pos] +} + +func (p *printer) commentTextAt(start int) string { + if start < len(p.output) && p.output[start] == tabwriter.Escape { + start++ + } + pos := start + for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) { + pos++ + } + return string(p.output[start:pos]) +} + +func isNL(b byte) bool { + return b == '\n' || b == '\f' +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/nodes.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/nodes.go new file mode 100644 index 0000000000..a58525b855 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/nodes.go @@ -0,0 +1,2001 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements printing of AST nodes; specifically +// expressions, statements, declarations, and files. It uses +// the print functionality implemented in printer.go. + +package printer + +import ( + "go/ast" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// Formatting issues: +// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration) +// when the comment spans multiple lines; if such a comment is just two lines, formatting is +// not idempotent +// - formatting of expression lists +// - should use blank instead of tab to separate one-line function bodies from +// the function header unless there is a group of consecutive one-liners + +// ---------------------------------------------------------------------------- +// Common AST nodes. + +// Print as many newlines as necessary (but at least min newlines) to get to +// the current line. ws is printed before the first line break. If newSection +// is set, the first line break is printed as formfeed. Returns 0 if no line +// breaks were printed, returns 1 if there was exactly one newline printed, +// and returns a value > 1 if there was a formfeed or more than one newline +// printed. +// +// TODO(gri): linebreak may add too many lines if the next statement at "line" +// is preceded by comments because the computation of n assumes +// the current position before the comment and the target position +// after the comment. Thus, after interspersing such comments, the +// space taken up by them is not considered to reduce the number of +// linebreaks. At the moment there is no easy way to know about +// future (not yet interspersed) comments in this function. +func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) { + n := nlimit(line - p.pos.Line) + if n < min { + n = min + } + if n > 0 { + p.print(ws) + if newSection { + p.print(formfeed) + n-- + nbreaks = 2 + } + nbreaks += n + for ; n > 0; n-- { + p.print(newline) + } + } + return +} + +// setComment sets g as the next comment if g != nil and if node comments +// are enabled - this mode is used when printing source code fragments such +// as exports only. It assumes that there is no pending comment in p.comments +// and at most one pending comment in the p.comment cache. +func (p *printer) setComment(g *ast.CommentGroup) { + if g == nil || !p.useNodeComments { + return + } + if p.comments == nil { + // initialize p.comments lazily + p.comments = make([]*ast.CommentGroup, 1) + } else if p.cindex < len(p.comments) { + // for some reason there are pending comments; this + // should never happen - handle gracefully and flush + // all comments up to g, ignore anything after that + p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL) + p.comments = p.comments[0:1] + // in debug mode, report error + p.internalError("setComment found pending comments") + } + p.comments[0] = g + p.cindex = 0 + // don't overwrite any pending comment in the p.comment cache + // (there may be a pending comment when a line comment is + // immediately followed by a lead comment with no other + // tokens between) + if p.commentOffset == infinity { + p.nextComment() // get comment ready for use + } +} + +type exprListMode uint + +const ( + commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma + noIndent // no extra indentation in multi-line lists +) + +// If indent is set, a multi-line identifier list is indented after the +// first linebreak encountered. +func (p *printer) identList(list []*ast.Ident, indent bool) { + // convert into an expression list so we can re-use exprList formatting + xlist := make([]ast.Expr, len(list)) + for i, x := range list { + xlist[i] = x + } + var mode exprListMode + if !indent { + mode = noIndent + } + p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false) +} + +const filteredMsg = "contains filtered or unexported fields" + +// Print a list of expressions. If the list spans multiple +// source lines, the original line breaks are respected between +// expressions. +// +// TODO(gri) Consider rewriting this to be independent of []ast.Expr +// so that we can use the algorithm for any kind of list +// +// (e.g., pass list via a channel over which to range). +func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) { + if len(list) == 0 { + if isIncomplete { + prev := p.posFor(prev0) + next := p.posFor(next0) + if prev.IsValid() && prev.Line == next.Line { + p.print("/* " + filteredMsg + " */") + } else { + p.print(newline) + p.print(indent, "// "+filteredMsg, unindent, newline) + } + } + return + } + + prev := p.posFor(prev0) + next := p.posFor(next0) + line := p.lineFor(list[0].Pos()) + endLine := p.lineFor(list[len(list)-1].End()) + + if prev.IsValid() && prev.Line == line && line == endLine { + // all list entries on a single line + for i, x := range list { + if i > 0 { + // use position of expression following the comma as + // comma position for correct comment placement + p.setPos(x.Pos()) + p.print(token.COMMA, blank) + } + p.expr0(x, depth) + } + if isIncomplete { + p.print(token.COMMA, blank, "/* "+filteredMsg+" */") + } + return + } + + // list entries span multiple lines; + // use source code positions to guide line breaks + + // Don't add extra indentation if noIndent is set; + // i.e., pretend that the first line is already indented. + ws := ignore + if mode&noIndent == 0 { + ws = indent + } + + // The first linebreak is always a formfeed since this section must not + // depend on any previous formatting. + prevBreak := -1 // index of last expression that was followed by a linebreak + if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 { + ws = ignore + prevBreak = 0 + } + + // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line + size := 0 + + // We use the ratio between the geometric mean of the previous key sizes and + // the current size to determine if there should be a break in the alignment. + // To compute the geometric mean we accumulate the ln(size) values (lnsum) + // and the number of sizes included (count). + lnsum := 0.0 + count := 0 + + // print all list elements + prevLine := prev.Line + for i, x := range list { + line = p.lineFor(x.Pos()) + + // Determine if the next linebreak, if any, needs to use formfeed: + // in general, use the entire node size to make the decision; for + // key:value expressions, use the key size. + // TODO(gri) for a better result, should probably incorporate both + // the key and the node size into the decision process + useFF := true + + // Determine element size: All bets are off if we don't have + // position information for the previous and next token (likely + // generated code - simply ignore the size in this case by setting + // it to 0). + prevSize := size + const infinity = 1e6 // larger than any source line + size = p.nodeSize(x, infinity) + pair, isPair := x.(*ast.KeyValueExpr) + if size <= infinity && prev.IsValid() && next.IsValid() { + // x fits on a single line + if isPair { + size = p.nodeSize(pair.Key, infinity) // size <= infinity + } + } else { + // size too large or we don't have good layout information + size = 0 + } + + // If the previous line and the current line had single- + // line-expressions and the key sizes are small or the + // ratio between the current key and the geometric mean + // if the previous key sizes does not exceed a threshold, + // align columns and do not use formfeed. + if prevSize > 0 && size > 0 { + const smallSize = 40 + if count == 0 || prevSize <= smallSize && size <= smallSize { + useFF = false + } else { + const r = 2.5 // threshold + geomean := math.Exp(lnsum / float64(count)) // count > 0 + ratio := float64(size) / geomean + useFF = r*ratio <= 1 || r <= ratio + } + } + + needsLinebreak := 0 < prevLine && prevLine < line + if i > 0 { + // Use position of expression following the comma as + // comma position for correct comment placement, but + // only if the expression is on the same line. + if !needsLinebreak { + p.setPos(x.Pos()) + } + p.print(token.COMMA) + needsBlank := true + if needsLinebreak { + // Lines are broken using newlines so comments remain aligned + // unless useFF is set or there are multiple expressions on + // the same line in which case formfeed is used. + nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i) + if nbreaks > 0 { + ws = ignore + prevBreak = i + needsBlank = false // we got a line break instead + } + // If there was a new section or more than one new line + // (which means that the tabwriter will implicitly break + // the section), reset the geomean variables since we are + // starting a new group of elements with the next element. + if nbreaks > 1 { + lnsum = 0 + count = 0 + } + } + if needsBlank { + p.print(blank) + } + } + + if len(list) > 1 && isPair && size > 0 && needsLinebreak { + // We have a key:value expression that fits onto one line + // and it's not on the same line as the prior expression: + // Use a column for the key such that consecutive entries + // can align if possible. + // (needsLinebreak is set if we started a new line before) + p.expr(pair.Key) + p.setPos(pair.Colon) + p.print(token.COLON, vtab) + p.expr(pair.Value) + } else { + p.expr0(x, depth) + } + + if size > 0 { + lnsum += math.Log(float64(size)) + count++ + } + + prevLine = line + } + + if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line { + // Print a terminating comma if the next token is on a new line. + p.print(token.COMMA) + if isIncomplete { + p.print(newline) + p.print("// " + filteredMsg) + } + if ws == ignore && mode&noIndent == 0 { + // unindent if we indented + p.print(unindent) + } + p.print(formfeed) // terminating comma needs a line break to look good + return + } + + if isIncomplete { + p.print(token.COMMA, newline) + p.print("// "+filteredMsg, newline) + } + + if ws == ignore && mode&noIndent == 0 { + // unindent if we indented + p.print(unindent) + } +} + +type paramMode int + +const ( + funcParam paramMode = iota + funcTParam + typeTParam +) + +func (p *printer) parameters(fields *ast.FieldList, mode paramMode) { + openTok, closeTok := token.LPAREN, token.RPAREN + if mode != funcParam { + openTok, closeTok = token.LBRACK, token.RBRACK + } + p.setPos(fields.Opening) + p.print(openTok) + if len(fields.List) > 0 { + prevLine := p.lineFor(fields.Opening) + ws := indent + for i, par := range fields.List { + // determine par begin and end line (may be different + // if there are multiple parameter names for this par + // or the type is on a separate line) + parLineBeg := p.lineFor(par.Pos()) + parLineEnd := p.lineFor(par.End()) + // separating "," if needed + needsLinebreak := 0 < prevLine && prevLine < parLineBeg + if i > 0 { + // use position of parameter following the comma as + // comma position for correct comma placement, but + // only if the next parameter is on the same line + if !needsLinebreak { + p.setPos(par.Pos()) + } + p.print(token.COMMA) + } + // separator if needed (linebreak or blank) + if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 { + // break line if the opening "(" or previous parameter ended on a different line + ws = ignore + } else if i > 0 { + p.print(blank) + } + // parameter names + if len(par.Names) > 0 { + // Very subtle: If we indented before (ws == ignore), identList + // won't indent again. If we didn't (ws == indent), identList will + // indent if the identList spans multiple lines, and it will outdent + // again at the end (and still ws == indent). Thus, a subsequent indent + // by a linebreak call after a type, or in the next multi-line identList + // will do the right thing. + p.identList(par.Names, ws == indent) + p.print(blank) + } + // parameter type + p.expr(stripParensAlways(par.Type)) + prevLine = parLineEnd + } + + // if the closing ")" is on a separate line from the last parameter, + // print an additional "," and line break + if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing { + p.print(token.COMMA) + p.linebreak(closing, 0, ignore, true) + } else if mode == typeTParam && fields.NumFields() == 1 && combinesWithName(fields.List[0].Type) { + // A type parameter list [P T] where the name P and the type expression T syntactically + // combine to another valid (value) expression requires a trailing comma, as in [P *T,] + // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list + // is not parsed as an array length [P*T]. + p.print(token.COMMA) + } + + // unindent if we indented + if ws == ignore { + p.print(unindent) + } + } + + p.setPos(fields.Closing) + p.print(closeTok) +} + +// combinesWithName reports whether a name followed by the expression x +// syntactically combines to another valid (value) expression. For instance +// using *T for x, "name *T" syntactically appears as the expression x*T. +// On the other hand, using P|Q or *P|~Q for x, "name P|Q" or name *P|~Q" +// cannot be combined into a valid (value) expression. +func combinesWithName(x ast.Expr) bool { + switch x := x.(type) { + case *ast.StarExpr: + // name *x.X combines to name*x.X if x.X is not a type element + return !isTypeElem(x.X) + case *ast.BinaryExpr: + return combinesWithName(x.X) && !isTypeElem(x.Y) + case *ast.ParenExpr: + // name(x) combines but we are making sure at + // the call site that x is never parenthesized. + panic("unexpected parenthesized expression") + } + return false +} + +// isTypeElem reports whether x is a (possibly parenthesized) type element expression. +// The result is false if x could be a type element OR an ordinary (value) expression. +func isTypeElem(x ast.Expr) bool { + switch x := x.(type) { + case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: + return true + case *ast.UnaryExpr: + return x.Op == token.TILDE + case *ast.BinaryExpr: + return isTypeElem(x.X) || isTypeElem(x.Y) + case *ast.ParenExpr: + return isTypeElem(x.X) + } + return false +} + +func (p *printer) signature(sig *ast.FuncType) { + if sig.TypeParams != nil { + p.parameters(sig.TypeParams, funcTParam) + } + if sig.Params != nil { + p.parameters(sig.Params, funcParam) + } else { + p.print(token.LPAREN, token.RPAREN) + } + res := sig.Results + n := res.NumFields() + if n > 0 { + // res != nil + p.print(blank) + if n == 1 && res.List[0].Names == nil { + // single anonymous res; no ()'s + p.expr(stripParensAlways(res.List[0].Type)) + return + } + p.parameters(res, funcParam) + } +} + +func identListSize(list []*ast.Ident, maxSize int) (size int) { + for i, x := range list { + if i > 0 { + size += len(", ") + } + size += utf8.RuneCountInString(x.Name) + if size >= maxSize { + break + } + } + return +} + +func (p *printer) isOneLineFieldList(list []*ast.Field) bool { + if len(list) != 1 { + return false // allow only one field + } + f := list[0] + if f.Tag != nil || f.Comment != nil { + return false // don't allow tags or comments + } + // only name(s) and type + const maxSize = 30 // adjust as appropriate, this is an approximate value + namesSize := identListSize(f.Names, maxSize) + if namesSize > 0 { + namesSize = 1 // blank between names and types + } + typeSize := p.nodeSize(f.Type, maxSize) + return namesSize+typeSize <= maxSize +} + +func (p *printer) setLineComment(text string) { + p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}}) +} + +func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) { + lbrace := fields.Opening + list := fields.List + rbrace := fields.Closing + hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace)) + srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace) + + if !hasComments && srcIsOneLine { + // possibly a one-line struct/interface + if len(list) == 0 { + // no blank between keyword and {} in this case + p.setPos(lbrace) + p.print(token.LBRACE) + p.setPos(rbrace) + p.print(token.RBRACE) + return + } else if p.isOneLineFieldList(list) { + // small enough - print on one line + // (don't use identList and ignore source line breaks) + p.setPos(lbrace) + p.print(token.LBRACE, blank) + f := list[0] + if isStruct { + for i, x := range f.Names { + if i > 0 { + // no comments so no need for comma position + p.print(token.COMMA, blank) + } + p.expr(x) + } + if len(f.Names) > 0 { + p.print(blank) + } + p.expr(f.Type) + } else { // interface + if len(f.Names) > 0 { + name := f.Names[0] // method name + p.expr(name) + p.signature(f.Type.(*ast.FuncType)) // don't print "func" + } else { + // embedded interface + p.expr(f.Type) + } + } + p.print(blank) + p.setPos(rbrace) + p.print(token.RBRACE) + return + } + } + // hasComments || !srcIsOneLine + + p.print(blank) + p.setPos(lbrace) + p.print(token.LBRACE, indent) + if hasComments || len(list) > 0 { + p.print(formfeed) + } + + if isStruct { + + sep := vtab + if len(list) == 1 { + sep = blank + } + var line int + for i, f := range list { + if i > 0 { + p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0) + } + extraTabs := 0 + p.setComment(f.Doc) + p.recordLine(&line) + if len(f.Names) > 0 { + // named fields + p.identList(f.Names, false) + p.print(sep) + p.expr(f.Type) + extraTabs = 1 + } else { + // anonymous field + p.expr(f.Type) + extraTabs = 2 + } + if f.Tag != nil { + if len(f.Names) > 0 && sep == vtab { + p.print(sep) + } + p.print(sep) + p.expr(f.Tag) + extraTabs = 0 + } + if f.Comment != nil { + for ; extraTabs > 0; extraTabs-- { + p.print(sep) + } + p.setComment(f.Comment) + } + } + if isIncomplete { + if len(list) > 0 { + p.print(formfeed) + } + p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment + p.setLineComment("// " + filteredMsg) + } + + } else { // interface + + var line int + var prev *ast.Ident // previous "type" identifier + for i, f := range list { + var name *ast.Ident // first name, or nil + if len(f.Names) > 0 { + name = f.Names[0] + } + if i > 0 { + // don't do a line break (min == 0) if we are printing a list of types + // TODO(gri) this doesn't work quite right if the list of types is + // spread across multiple lines + min := 1 + if prev != nil && name == prev { + min = 0 + } + p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0) + } + p.setComment(f.Doc) + p.recordLine(&line) + if name != nil { + // method + p.expr(name) + p.signature(f.Type.(*ast.FuncType)) // don't print "func" + prev = nil + } else { + // embedded interface + p.expr(f.Type) + prev = nil + } + p.setComment(f.Comment) + } + if isIncomplete { + if len(list) > 0 { + p.print(formfeed) + } + p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment + p.setLineComment("// contains filtered or unexported methods") + } + + } + p.print(unindent, formfeed) + p.setPos(rbrace) + p.print(token.RBRACE) +} + +// ---------------------------------------------------------------------------- +// Expressions + +func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) { + switch e.Op.Precedence() { + case 4: + has4 = true + case 5: + has5 = true + } + + switch l := e.X.(type) { + case *ast.BinaryExpr: + if l.Op.Precedence() < e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *ast.ParenExpr and do nothing. + break + } + h4, h5, mp := walkBinary(l) + has4 = has4 || h4 + has5 = has5 || h5 + if maxProblem < mp { + maxProblem = mp + } + } + + switch r := e.Y.(type) { + case *ast.BinaryExpr: + if r.Op.Precedence() <= e.Op.Precedence() { + // parens will be inserted. + // pretend this is an *ast.ParenExpr and do nothing. + break + } + h4, h5, mp := walkBinary(r) + has4 = has4 || h4 + has5 = has5 || h5 + if maxProblem < mp { + maxProblem = mp + } + + case *ast.StarExpr: + if e.Op == token.QUO { // `*/` + maxProblem = 5 + } + + case *ast.UnaryExpr: + switch e.Op.String() + r.Op.String() { + case "/*", "&&", "&^": + maxProblem = 5 + case "++", "--": + if maxProblem < 4 { + maxProblem = 4 + } + } + } + return +} + +func cutoff(e *ast.BinaryExpr, depth int) int { + has4, has5, maxProblem := walkBinary(e) + if maxProblem > 0 { + return maxProblem + 1 + } + if has4 && has5 { + if depth == 1 { + return 5 + } + return 4 + } + if depth == 1 { + return 6 + } + return 4 +} + +func diffPrec(expr ast.Expr, prec int) int { + x, ok := expr.(*ast.BinaryExpr) + if !ok || prec != x.Op.Precedence() { + return 1 + } + return 0 +} + +func reduceDepth(depth int) int { + depth-- + if depth < 1 { + depth = 1 + } + return depth +} + +// Format the binary expression: decide the cutoff and then format. +// Let's call depth == 1 Normal mode, and depth > 1 Compact mode. +// (Algorithm suggestion by Russ Cox.) +// +// The precedences are: +// +// 5 * / % << >> & &^ +// 4 + - | ^ +// 3 == != < <= > >= +// 2 && +// 1 || +// +// The only decision is whether there will be spaces around levels 4 and 5. +// There are never spaces at level 6 (unary), and always spaces at levels 3 and below. +// +// To choose the cutoff, look at the whole expression but excluding primary +// expressions (function calls, parenthesized exprs), and apply these rules: +// +// 1. If there is a binary operator with a right side unary operand +// that would clash without a space, the cutoff must be (in order): +// +// /* 6 +// && 6 +// &^ 6 +// ++ 5 +// -- 5 +// +// (Comparison operators always have spaces around them.) +// +// 2. If there is a mix of level 5 and level 4 operators, then the cutoff +// is 5 (use spaces to distinguish precedence) in Normal mode +// and 4 (never use spaces) in Compact mode. +// +// 3. If there are no level 4 operators or no level 5 operators, then the +// cutoff is 6 (always use spaces) in Normal mode +// and 4 (never use spaces) in Compact mode. +func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) { + prec := x.Op.Precedence() + if prec < prec1 { + // parenthesis needed + // Note: The parser inserts an ast.ParenExpr node; thus this case + // can only occur if the AST is created in a different way. + p.print(token.LPAREN) + p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth + p.print(token.RPAREN) + return + } + + printBlank := prec < cutoff + + ws := indent + p.expr1(x.X, prec, depth+diffPrec(x.X, prec)) + if printBlank { + p.print(blank) + } + xline := p.pos.Line // before the operator (it may be on the next line!) + yline := p.lineFor(x.Y.Pos()) + p.setPos(x.OpPos) + p.print(x.Op) + if xline != yline && xline > 0 && yline > 0 { + // at least one line break, but respect an extra empty line + // in the source + if p.linebreak(yline, 1, ws, true) > 0 { + ws = ignore + printBlank = false // no blank after line break + } + } + if printBlank { + p.print(blank) + } + p.expr1(x.Y, prec+1, depth+1) + if ws == ignore { + p.print(unindent) + } +} + +func isBinary(expr ast.Expr) bool { + _, ok := expr.(*ast.BinaryExpr) + return ok +} + +func (p *printer) expr1(expr ast.Expr, prec1, depth int) { + p.setPos(expr.Pos()) + + switch x := expr.(type) { + case *ast.BadExpr: + p.print("BadExpr") + + case *ast.Ident: + p.print(x) + + case *ast.BinaryExpr: + if depth < 1 { + p.internalError("depth < 1:", depth) + depth = 1 + } + p.binaryExpr(x, prec1, cutoff(x, depth), depth) + + case *ast.KeyValueExpr: + p.expr(x.Key) + p.setPos(x.Colon) + p.print(token.COLON, blank) + p.expr(x.Value) + + case *ast.StarExpr: + const prec = token.UnaryPrec + if prec < prec1 { + // parenthesis needed + p.print(token.LPAREN) + p.print(token.MUL) + p.expr(x.X) + p.print(token.RPAREN) + } else { + // no parenthesis needed + p.print(token.MUL) + p.expr(x.X) + } + + case *ast.UnaryExpr: + const prec = token.UnaryPrec + if prec < prec1 { + // parenthesis needed + p.print(token.LPAREN) + p.expr(x) + p.print(token.RPAREN) + } else { + // no parenthesis needed + p.print(x.Op) + if x.Op == token.RANGE { + // TODO(gri) Remove this code if it cannot be reached. + p.print(blank) + } + p.expr1(x.X, prec, depth) + } + + case *ast.BasicLit: + if p.Config.Mode&normalizeNumbers != 0 { + x = normalizedNumber(x) + } + p.print(x) + + case *ast.FuncLit: + p.setPos(x.Type.Pos()) + p.print(token.FUNC) + // See the comment in funcDecl about how the header size is computed. + startCol := p.out.Column - len("func") + p.signature(x.Type) + p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body) + + case *ast.ParenExpr: + if _, hasParens := x.X.(*ast.ParenExpr); hasParens { + // don't print parentheses around an already parenthesized expression + // TODO(gri) consider making this more general and incorporate precedence levels + p.expr0(x.X, depth) + } else { + p.print(token.LPAREN) + p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth + p.setPos(x.Rparen) + p.print(token.RPAREN) + } + + case *ast.SelectorExpr: + p.selectorExpr(x, depth, false) + + case *ast.TypeAssertExpr: + p.expr1(x.X, token.HighestPrec, depth) + p.print(token.PERIOD) + p.setPos(x.Lparen) + p.print(token.LPAREN) + if x.Type != nil { + p.expr(x.Type) + } else { + p.print(token.TYPE) + } + p.setPos(x.Rparen) + p.print(token.RPAREN) + + case *ast.IndexExpr: + // TODO(gri): should treat[] like parentheses and undo one level of depth + p.expr1(x.X, token.HighestPrec, 1) + p.setPos(x.Lbrack) + p.print(token.LBRACK) + p.expr0(x.Index, depth+1) + p.setPos(x.Rbrack) + p.print(token.RBRACK) + + case *ast.IndexListExpr: + // TODO(gri): as for IndexExpr, should treat [] like parentheses and undo + // one level of depth + p.expr1(x.X, token.HighestPrec, 1) + p.setPos(x.Lbrack) + p.print(token.LBRACK) + p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false) + p.setPos(x.Rbrack) + p.print(token.RBRACK) + + case *ast.SliceExpr: + // TODO(gri): should treat[] like parentheses and undo one level of depth + p.expr1(x.X, token.HighestPrec, 1) + p.setPos(x.Lbrack) + p.print(token.LBRACK) + indices := []ast.Expr{x.Low, x.High} + if x.Max != nil { + indices = append(indices, x.Max) + } + // determine if we need extra blanks around ':' + var needsBlanks bool + if depth <= 1 { + var indexCount int + var hasBinaries bool + for _, x := range indices { + if x != nil { + indexCount++ + if isBinary(x) { + hasBinaries = true + } + } + } + if indexCount > 1 && hasBinaries { + needsBlanks = true + } + } + for i, x := range indices { + if i > 0 { + if indices[i-1] != nil && needsBlanks { + p.print(blank) + } + p.print(token.COLON) + if x != nil && needsBlanks { + p.print(blank) + } + } + if x != nil { + p.expr0(x, depth+1) + } + } + p.setPos(x.Rbrack) + p.print(token.RBRACK) + + case *ast.CallExpr: + if len(x.Args) > 1 { + depth++ + } + var wasIndented bool + if _, ok := x.Fun.(*ast.FuncType); ok { + // conversions to literal function types require parentheses around the type + p.print(token.LPAREN) + wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) + p.print(token.RPAREN) + } else { + wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth) + } + p.setPos(x.Lparen) + p.print(token.LPAREN) + if x.Ellipsis.IsValid() { + p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis, false) + p.setPos(x.Ellipsis) + p.print(token.ELLIPSIS) + if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) { + p.print(token.COMMA, formfeed) + } + } else { + p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen, false) + } + p.setPos(x.Rparen) + p.print(token.RPAREN) + if wasIndented { + p.print(unindent) + } + + case *ast.CompositeLit: + // composite literal elements that are composite literals themselves may have the type omitted + if x.Type != nil { + p.expr1(x.Type, token.HighestPrec, depth) + } + p.level++ + p.setPos(x.Lbrace) + p.print(token.LBRACE) + p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete) + // do not insert extra line break following a /*-style comment + // before the closing '}' as it might break the code if there + // is no trailing ',' + mode := noExtraLinebreak + // do not insert extra blank following a /*-style comment + // before the closing '}' unless the literal is empty + if len(x.Elts) > 0 { + mode |= noExtraBlank + } + // need the initial indent to print lone comments with + // the proper level of indentation + p.print(indent, unindent, mode) + p.setPos(x.Rbrace) + p.print(token.RBRACE, mode) + p.level-- + + case *ast.Ellipsis: + p.print(token.ELLIPSIS) + if x.Elt != nil { + p.expr(x.Elt) + } + + case *ast.ArrayType: + p.print(token.LBRACK) + if x.Len != nil { + p.expr(x.Len) + } + p.print(token.RBRACK) + p.expr(x.Elt) + + case *ast.StructType: + p.print(token.STRUCT) + p.fieldList(x.Fields, true, x.Incomplete) + + case *ast.FuncType: + p.print(token.FUNC) + p.signature(x) + + case *ast.InterfaceType: + p.print(token.INTERFACE) + p.fieldList(x.Methods, false, x.Incomplete) + + case *ast.MapType: + p.print(token.MAP, token.LBRACK) + p.expr(x.Key) + p.print(token.RBRACK) + p.expr(x.Value) + + case *ast.ChanType: + switch x.Dir { + case ast.SEND | ast.RECV: + p.print(token.CHAN) + case ast.RECV: + p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same + case ast.SEND: + p.print(token.CHAN) + p.setPos(x.Arrow) + p.print(token.ARROW) + } + p.print(blank) + p.expr(x.Value) + + default: + panic("unreachable") + } +} + +// normalizedNumber rewrites base prefixes and exponents +// of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3), +// and removes leading 0's from integer imaginary literals (0765i to 765i). +// It leaves hexadecimal digits alone. +// +// normalizedNumber doesn't modify the ast.BasicLit value lit points to. +// If lit is not a number or a number in canonical format already, +// lit is returned as is. Otherwise a new ast.BasicLit is created. +func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit { + if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG { + return lit // not a number - nothing to do + } + if len(lit.Value) < 2 { + return lit // only one digit (common case) - nothing to do + } + // len(lit.Value) >= 2 + + // We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer + // or floating-point value, decimal or not. Instead, just consider the literal pattern. + x := lit.Value + switch x[:2] { + default: + // 0-prefix octal, decimal int, or float (possibly with 'i' suffix) + if i := strings.LastIndexByte(x, 'E'); i >= 0 { + x = x[:i] + "e" + x[i+1:] + break + } + // remove leading 0's from integer (but not floating-point) imaginary literals + if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") { + x = strings.TrimLeft(x, "0_") + if x == "i" { + x = "0i" + } + } + case "0X": + x = "0x" + x[2:] + // possibly a hexadecimal float + if i := strings.LastIndexByte(x, 'P'); i >= 0 { + x = x[:i] + "p" + x[i+1:] + } + case "0x": + // possibly a hexadecimal float + i := strings.LastIndexByte(x, 'P') + if i == -1 { + return lit // nothing to do + } + x = x[:i] + "p" + x[i+1:] + case "0O": + x = "0o" + x[2:] + case "0o": + return lit // nothing to do + case "0B": + x = "0b" + x[2:] + case "0b": + return lit // nothing to do + } + + return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x} +} + +func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool { + if x, ok := expr.(*ast.SelectorExpr); ok { + return p.selectorExpr(x, depth, true) + } + p.expr1(expr, prec1, depth) + return false +} + +// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans +// multiple lines. +func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool { + p.expr1(x.X, token.HighestPrec, depth) + p.print(token.PERIOD) + if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line { + p.print(indent, newline) + p.setPos(x.Sel.Pos()) + p.print(x.Sel) + if !isMethod { + p.print(unindent) + } + return true + } + p.setPos(x.Sel.Pos()) + p.print(x.Sel) + return false +} + +func (p *printer) expr0(x ast.Expr, depth int) { + p.expr1(x, token.LowestPrec, depth) +} + +func (p *printer) expr(x ast.Expr) { + const depth = 1 + p.expr1(x, token.LowestPrec, depth) +} + +// ---------------------------------------------------------------------------- +// Statements + +// Print the statement list indented, but without a newline after the last statement. +// Extra line breaks between statements in the source are respected but at most one +// empty line is printed between statements. +func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) { + if nindent > 0 { + p.print(indent) + } + var line int + i := 0 + for _, s := range list { + // ignore empty statements (was issue 3466) + if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty { + // nindent == 0 only for lists of switch/select case clauses; + // in those cases each clause is a new section + if len(p.output) > 0 { + // only print line break if we are not at the beginning of the output + // (i.e., we are not printing only a partial program) + p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0) + } + p.recordLine(&line) + p.stmt(s, nextIsRBrace && i == len(list)-1) + // labeled statements put labels on a separate line, but here + // we only care about the start line of the actual statement + // without label - correct line for each label + for t := s; ; { + lt, _ := t.(*ast.LabeledStmt) + if lt == nil { + break + } + line++ + t = lt.Stmt + } + i++ + } + } + if nindent > 0 { + p.print(unindent) + } +} + +// block prints an *ast.BlockStmt; it always spans at least two lines. +func (p *printer) block(b *ast.BlockStmt, nindent int) { + p.setPos(b.Lbrace) + p.print(token.LBRACE) + p.stmtList(b.List, nindent, true) + p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true) + p.setPos(b.Rbrace) + p.print(token.RBRACE) +} + +func isTypeName(x ast.Expr) bool { + switch t := x.(type) { + case *ast.Ident: + return true + case *ast.SelectorExpr: + return isTypeName(t.X) + } + return false +} + +func stripParens(x ast.Expr) ast.Expr { + if px, strip := x.(*ast.ParenExpr); strip { + // parentheses must not be stripped if there are any + // unparenthesized composite literals starting with + // a type name + ast.Inspect(px.X, func(node ast.Node) bool { + switch x := node.(type) { + case *ast.ParenExpr: + // parentheses protect enclosed composite literals + return false + case *ast.CompositeLit: + if isTypeName(x.Type) { + strip = false // do not strip parentheses + } + return false + } + // in all other cases, keep inspecting + return true + }) + if strip { + return stripParens(px.X) + } + } + return x +} + +func stripParensAlways(x ast.Expr) ast.Expr { + if x, ok := x.(*ast.ParenExpr); ok { + return stripParensAlways(x.X) + } + return x +} + +func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) { + p.print(blank) + needsBlank := false + if init == nil && post == nil { + // no semicolons required + if expr != nil { + p.expr(stripParens(expr)) + needsBlank = true + } + } else { + // all semicolons required + // (they are not separators, print them explicitly) + if init != nil { + p.stmt(init, false) + } + p.print(token.SEMICOLON, blank) + if expr != nil { + p.expr(stripParens(expr)) + needsBlank = true + } + if isForStmt { + p.print(token.SEMICOLON, blank) + needsBlank = false + if post != nil { + p.stmt(post, false) + needsBlank = true + } + } + } + if needsBlank { + p.print(blank) + } +} + +// indentList reports whether an expression list would look better if it +// were indented wholesale (starting with the very first element, rather +// than starting at the first line break). +func (p *printer) indentList(list []ast.Expr) bool { + // Heuristic: indentList reports whether there are more than one multi- + // line element in the list, or if there is any element that is not + // starting on the same line as the previous one ends. + if len(list) >= 2 { + b := p.lineFor(list[0].Pos()) + e := p.lineFor(list[len(list)-1].End()) + if 0 < b && b < e { + // list spans multiple lines + n := 0 // multi-line element count + line := b + for _, x := range list { + xb := p.lineFor(x.Pos()) + xe := p.lineFor(x.End()) + if line < xb { + // x is not starting on the same + // line as the previous one ended + return true + } + if xb < xe { + // x is a multi-line element + n++ + } + line = xe + } + return n > 1 + } + } + return false +} + +func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) { + p.setPos(stmt.Pos()) + + switch s := stmt.(type) { + case *ast.BadStmt: + p.print("BadStmt") + + case *ast.DeclStmt: + p.decl(s.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + // a "correcting" unindent immediately following a line break + // is applied before the line break if there is no comment + // between (see writeWhitespace) + p.print(unindent) + p.expr(s.Label) + p.setPos(s.Colon) + p.print(token.COLON, indent) + if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty { + if !nextIsRBrace { + p.print(newline) + p.setPos(e.Pos()) + p.print(token.SEMICOLON) + break + } + } else { + p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true) + } + p.stmt(s.Stmt, nextIsRBrace) + + case *ast.ExprStmt: + const depth = 1 + p.expr0(s.X, depth) + + case *ast.SendStmt: + const depth = 1 + p.expr0(s.Chan, depth) + p.print(blank) + p.setPos(s.Arrow) + p.print(token.ARROW, blank) + p.expr0(s.Value, depth) + + case *ast.IncDecStmt: + const depth = 1 + p.expr0(s.X, depth+1) + p.setPos(s.TokPos) + p.print(s.Tok) + + case *ast.AssignStmt: + depth := 1 + if len(s.Lhs) > 1 && len(s.Rhs) > 1 { + depth++ + } + p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false) + p.print(blank) + p.setPos(s.TokPos) + p.print(s.Tok, blank) + p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false) + + case *ast.GoStmt: + p.print(token.GO, blank) + p.expr(s.Call) + + case *ast.DeferStmt: + p.print(token.DEFER, blank) + p.expr(s.Call) + + case *ast.ReturnStmt: + p.print(token.RETURN) + if s.Results != nil { + p.print(blank) + // Use indentList heuristic to make corner cases look + // better (issue 1207). A more systematic approach would + // always indent, but this would cause significant + // reformatting of the code base and not necessarily + // lead to more nicely formatted code in general. + if p.indentList(s.Results) { + p.print(indent) + // Use NoPos so that a newline never goes before + // the results (see issue #32854). + p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false) + p.print(unindent) + } else { + p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false) + } + } + + case *ast.BranchStmt: + p.print(s.Tok) + if s.Label != nil { + p.print(blank) + p.expr(s.Label) + } + + case *ast.BlockStmt: + p.block(s, 1) + + case *ast.IfStmt: + p.print(token.IF) + p.controlClause(false, s.Init, s.Cond, nil) + p.block(s.Body, 1) + if s.Else != nil { + p.print(blank, token.ELSE, blank) + switch s.Else.(type) { + case *ast.BlockStmt, *ast.IfStmt: + p.stmt(s.Else, nextIsRBrace) + default: + // This can only happen with an incorrectly + // constructed AST. Permit it but print so + // that it can be parsed without errors. + p.print(token.LBRACE, indent, formfeed) + p.stmt(s.Else, true) + p.print(unindent, formfeed, token.RBRACE) + } + } + + case *ast.CaseClause: + if s.List != nil { + p.print(token.CASE, blank) + p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false) + } else { + p.print(token.DEFAULT) + } + p.setPos(s.Colon) + p.print(token.COLON) + p.stmtList(s.Body, 1, nextIsRBrace) + + case *ast.SwitchStmt: + p.print(token.SWITCH) + p.controlClause(false, s.Init, s.Tag, nil) + p.block(s.Body, 0) + + case *ast.TypeSwitchStmt: + p.print(token.SWITCH) + if s.Init != nil { + p.print(blank) + p.stmt(s.Init, false) + p.print(token.SEMICOLON) + } + p.print(blank) + p.stmt(s.Assign, false) + p.print(blank) + p.block(s.Body, 0) + + case *ast.CommClause: + if s.Comm != nil { + p.print(token.CASE, blank) + p.stmt(s.Comm, false) + } else { + p.print(token.DEFAULT) + } + p.setPos(s.Colon) + p.print(token.COLON) + p.stmtList(s.Body, 1, nextIsRBrace) + + case *ast.SelectStmt: + p.print(token.SELECT, blank) + body := s.Body + if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) { + // print empty select statement w/o comments on one line + p.setPos(body.Lbrace) + p.print(token.LBRACE) + p.setPos(body.Rbrace) + p.print(token.RBRACE) + } else { + p.block(body, 0) + } + + case *ast.ForStmt: + p.print(token.FOR) + p.controlClause(true, s.Init, s.Cond, s.Post) + p.block(s.Body, 1) + + case *ast.RangeStmt: + p.print(token.FOR, blank) + if s.Key != nil { + p.expr(s.Key) + if s.Value != nil { + // use position of value following the comma as + // comma position for correct comment placement + p.setPos(s.Value.Pos()) + p.print(token.COMMA, blank) + p.expr(s.Value) + } + p.print(blank) + p.setPos(s.TokPos) + p.print(s.Tok, blank) + } + p.print(token.RANGE, blank) + p.expr(stripParens(s.X)) + p.print(blank) + p.block(s.Body, 1) + + default: + panic("unreachable") + } +} + +// ---------------------------------------------------------------------------- +// Declarations + +// The keepTypeColumn function determines if the type column of a series of +// consecutive const or var declarations must be kept, or if initialization +// values (V) can be placed in the type column (T) instead. The i'th entry +// in the result slice is true if the type column in spec[i] must be kept. +// +// For example, the declaration: +// +// const ( +// foobar int = 42 // comment +// x = 7 // comment +// foo +// bar = 991 +// ) +// +// leads to the type/values matrix below. A run of value columns (V) can +// be moved into the type column if there is no type for any of the values +// in that column (we only move entire columns so that they align properly). +// +// matrix formatted result +// matrix +// T V -> T V -> true there is a T and so the type +// - V - V true column must be kept +// - - - - false +// - V V - false V is moved into T column +func keepTypeColumn(specs []ast.Spec) []bool { + m := make([]bool, len(specs)) + + populate := func(i, j int, keepType bool) { + if keepType { + for ; i < j; i++ { + m[i] = true + } + } + } + + i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run + var keepType bool + for i, s := range specs { + t := s.(*ast.ValueSpec) + if t.Values != nil { + if i0 < 0 { + // start of a run of ValueSpecs with non-nil Values + i0 = i + keepType = false + } + } else { + if i0 >= 0 { + // end of a run + populate(i0, i, keepType) + i0 = -1 + } + } + if t.Type != nil { + keepType = true + } + } + if i0 >= 0 { + // end of a run + populate(i0, len(specs), keepType) + } + + return m +} + +func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) { + p.setComment(s.Doc) + p.identList(s.Names, false) // always present + extraTabs := 3 + if s.Type != nil || keepType { + p.print(vtab) + extraTabs-- + } + if s.Type != nil { + p.expr(s.Type) + } + if s.Values != nil { + p.print(vtab, token.ASSIGN, blank) + p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) + extraTabs-- + } + if s.Comment != nil { + for ; extraTabs > 0; extraTabs-- { + p.print(vtab) + } + p.setComment(s.Comment) + } +} + +func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit { + // Note: An unmodified AST generated by go/parser will already + // contain a backward- or double-quoted path string that does + // not contain any invalid characters, and most of the work + // here is not needed. However, a modified or generated AST + // may possibly contain non-canonical paths. Do the work in + // all cases since it's not too hard and not speed-critical. + + // if we don't have a proper string, be conservative and return whatever we have + if lit.Kind != token.STRING { + return lit + } + s, err := strconv.Unquote(lit.Value) + if err != nil { + return lit + } + + // if the string is an invalid path, return whatever we have + // + // spec: "Implementation restriction: A compiler may restrict + // ImportPaths to non-empty strings using only characters belonging + // to Unicode's L, M, N, P, and S general categories (the Graphic + // characters without spaces) and may also exclude the characters + // !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character + // U+FFFD." + if s == "" { + return lit + } + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + for _, r := range s { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return lit + } + } + + // otherwise, return the double-quoted path + s = strconv.Quote(s) + if s == lit.Value { + return lit // nothing wrong with lit + } + return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s} +} + +// The parameter n is the number of specs in the group. If doIndent is set, +// multi-line identifier lists in the spec are indented when the first +// linebreak is encountered. +func (p *printer) spec(spec ast.Spec, n int, doIndent bool) { + switch s := spec.(type) { + case *ast.ImportSpec: + p.setComment(s.Doc) + if s.Name != nil { + p.expr(s.Name) + p.print(blank) + } + p.expr(sanitizeImportPath(s.Path)) + p.setComment(s.Comment) + p.setPos(s.EndPos) + + case *ast.ValueSpec: + if n != 1 { + p.internalError("expected n = 1; got", n) + } + p.setComment(s.Doc) + p.identList(s.Names, doIndent) // always present + if s.Type != nil { + p.print(blank) + p.expr(s.Type) + } + if s.Values != nil { + p.print(blank, token.ASSIGN, blank) + p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false) + } + p.setComment(s.Comment) + + case *ast.TypeSpec: + p.setComment(s.Doc) + p.expr(s.Name) + if s.TypeParams != nil { + p.parameters(s.TypeParams, typeTParam) + } + if n == 1 { + p.print(blank) + } else { + p.print(vtab) + } + if s.Assign.IsValid() { + p.print(token.ASSIGN, blank) + } + p.expr(s.Type) + p.setComment(s.Comment) + + default: + panic("unreachable") + } +} + +func (p *printer) genDecl(d *ast.GenDecl) { + p.setComment(d.Doc) + p.setPos(d.Pos()) + p.print(d.Tok, blank) + + if d.Lparen.IsValid() || len(d.Specs) > 1 { + // group of parenthesized declarations + p.setPos(d.Lparen) + p.print(token.LPAREN) + if n := len(d.Specs); n > 0 { + p.print(indent, formfeed) + if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) { + // two or more grouped const/var declarations: + // determine if the type column must be kept + keepType := keepTypeColumn(d.Specs) + var line int + for i, s := range d.Specs { + if i > 0 { + p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) + } + p.recordLine(&line) + p.valueSpec(s.(*ast.ValueSpec), keepType[i]) + } + } else { + var line int + for i, s := range d.Specs { + if i > 0 { + p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0) + } + p.recordLine(&line) + p.spec(s, n, false) + } + } + p.print(unindent, formfeed) + } + p.setPos(d.Rparen) + p.print(token.RPAREN) + + } else if len(d.Specs) > 0 { + // single declaration + p.spec(d.Specs[0], 1, true) + } +} + +// sizeCounter is an io.Writer which counts the number of bytes written, +// as well as whether a newline character was seen. +type sizeCounter struct { + hasNewline bool + size int +} + +func (c *sizeCounter) Write(p []byte) (int, error) { + if !c.hasNewline { + for _, b := range p { + if b == '\n' || b == '\f' { + c.hasNewline = true + break + } + } + } + c.size += len(p) + return len(p), nil +} + +// nodeSize determines the size of n in chars after formatting. +// The result is <= maxSize if the node fits on one line with at +// most maxSize chars and the formatted output doesn't contain +// any control chars. Otherwise, the result is > maxSize. +func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) { + // nodeSize invokes the printer, which may invoke nodeSize + // recursively. For deep composite literal nests, this can + // lead to an exponential algorithm. Remember previous + // results to prune the recursion (was issue 1628). + if size, found := p.nodeSizes[n]; found { + return size + } + + size = maxSize + 1 // assume n doesn't fit + p.nodeSizes[n] = size + + // nodeSize computation must be independent of particular + // style so that we always get the same decision; print + // in RawFormat + cfg := Config{Mode: RawFormat} + var counter sizeCounter + if err := cfg.fprint(&counter, p.fset, n, p.nodeSizes); err != nil { + return + } + if counter.size <= maxSize && !counter.hasNewline { + // n fits in a single line + size = counter.size + p.nodeSizes[n] = size + } + return +} + +// numLines returns the number of lines spanned by node n in the original source. +func (p *printer) numLines(n ast.Node) int { + if from := n.Pos(); from.IsValid() { + if to := n.End(); to.IsValid() { + return p.lineFor(to) - p.lineFor(from) + 1 + } + } + return infinity +} + +// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's. +func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int { + pos1 := b.Pos() + pos2 := b.Rbrace + if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) { + // opening and closing brace are on different lines - don't make it a one-liner + return maxSize + 1 + } + if len(b.List) > 5 { + // too many statements - don't make it a one-liner + return maxSize + 1 + } + // otherwise, estimate body size + bodySize := p.commentSizeBefore(p.posFor(pos2)) + for i, s := range b.List { + if bodySize > maxSize { + break // no need to continue + } + if i > 0 { + bodySize += 2 // space for a semicolon and blank + } + bodySize += p.nodeSize(s, maxSize) + } + return bodySize +} + +// funcBody prints a function body following a function header of given headerSize. +// If the header's and block's size are "small enough" and the block is "simple enough", +// the block is printed on the current line, without line breaks, spaced from the header +// by sep. Otherwise the block's opening "{" is printed on the current line, followed by +// lines for the block's statements and its closing "}". +func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) { + if b == nil { + return + } + + // save/restore composite literal nesting level + defer func(level int) { + p.level = level + }(p.level) + p.level = 0 + + const maxSize = 100 + if headerSize+p.bodySize(b, maxSize) <= maxSize { + p.print(sep) + p.setPos(b.Lbrace) + p.print(token.LBRACE) + if len(b.List) > 0 { + p.print(blank) + for i, s := range b.List { + if i > 0 { + p.print(token.SEMICOLON, blank) + } + p.stmt(s, i == len(b.List)-1) + } + p.print(blank) + } + p.print(noExtraLinebreak) + p.setPos(b.Rbrace) + p.print(token.RBRACE, noExtraLinebreak) + return + } + + if sep != ignore { + p.print(blank) // always use blank + } + p.block(b, 1) +} + +// distanceFrom returns the column difference between p.out (the current output +// position) and startOutCol. If the start position is on a different line from +// the current position (or either is unknown), the result is infinity. +func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int { + if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line { + return p.out.Column - startOutCol + } + return infinity +} + +func (p *printer) funcDecl(d *ast.FuncDecl) { + p.setComment(d.Doc) + p.setPos(d.Pos()) + p.print(token.FUNC, blank) + // We have to save startCol only after emitting FUNC; otherwise it can be on a + // different line (all whitespace preceding the FUNC is emitted only when the + // FUNC is emitted). + startCol := p.out.Column - len("func ") + if d.Recv != nil { + p.parameters(d.Recv, funcParam) // method: print receiver + p.print(blank) + } + p.expr(d.Name) + p.signature(d.Type) + p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body) +} + +func (p *printer) decl(decl ast.Decl) { + switch d := decl.(type) { + case *ast.BadDecl: + p.setPos(d.Pos()) + p.print("BadDecl") + case *ast.GenDecl: + p.genDecl(d) + case *ast.FuncDecl: + p.funcDecl(d) + default: + panic("unreachable") + } +} + +// ---------------------------------------------------------------------------- +// Files + +func declToken(decl ast.Decl) (tok token.Token) { + tok = token.ILLEGAL + switch d := decl.(type) { + case *ast.GenDecl: + tok = d.Tok + case *ast.FuncDecl: + tok = token.FUNC + } + return +} + +func (p *printer) declList(list []ast.Decl) { + tok := token.ILLEGAL + for _, d := range list { + prev := tok + tok = declToken(d) + // If the declaration token changed (e.g., from CONST to TYPE) + // or the next declaration has documentation associated with it, + // print an empty line between top-level declarations. + // (because p.linebreak is called with the position of d, which + // is past any documentation, the minimum requirement is satisfied + // even w/o the extra getDoc(d) nil-check - leave it in case the + // linebreak logic improves - there's already a TODO). + if len(p.output) > 0 { + // only print line break if we are not at the beginning of the output + // (i.e., we are not printing only a partial program) + min := 1 + if prev != tok || getDoc(d) != nil { + min = 2 + } + // start a new section if the next declaration is a function + // that spans multiple lines (see also issue #19544) + p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1) + } + p.decl(d) + } +} + +func (p *printer) file(src *ast.File) { + p.setComment(src.Doc) + p.setPos(src.Pos()) + p.print(token.PACKAGE, blank) + p.expr(src.Name) + p.declList(src.Decls) + p.print(newline) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/printer.go b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/printer.go new file mode 100644 index 0000000000..2ab0278b08 --- /dev/null +++ b/vendor/mvdan.cc/gofumpt/internal/govendor/go/printer/printer.go @@ -0,0 +1,1435 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package printer implements printing of AST nodes. +package printer + +import ( + "fmt" + "go/ast" + "go/build/constraint" + "go/token" + "io" + "os" + "strings" + "sync" + "text/tabwriter" + "unicode" +) + +const ( + maxNewlines = 2 // max. number of newlines between source text + debug = false // enable for debugging + infinity = 1 << 30 +) + +type whiteSpace byte + +const ( + ignore = whiteSpace(0) + blank = whiteSpace(' ') + vtab = whiteSpace('\v') + newline = whiteSpace('\n') + formfeed = whiteSpace('\f') + indent = whiteSpace('>') + unindent = whiteSpace('<') +) + +// A pmode value represents the current printer mode. +type pmode int + +const ( + noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment + noExtraLinebreak // disables extra line break after /*-style comment +) + +type commentInfo struct { + cindex int // current comment index + comment *ast.CommentGroup // = printer.comments[cindex]; or nil + commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity + commentNewline bool // true if the comment group contains newlines +} + +type printer struct { + // Configuration (does not change after initialization) + Config + fset *token.FileSet + + // Current state + output []byte // raw printer result + indent int // current indentation + level int // level == 0: outside composite literal; level > 0: inside composite literal + mode pmode // current printer mode + endAlignment bool // if set, terminate alignment immediately + impliedSemi bool // if set, a linebreak implies a semicolon + lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace) + prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL + wsbuf []whiteSpace // delayed white space + goBuild []int // start index of all //go:build comments in output + plusBuild []int // start index of all // +build comments in output + + // Positions + // The out position differs from the pos position when the result + // formatting differs from the source formatting (in the amount of + // white space). If there's a difference and SourcePos is set in + // ConfigMode, //line directives are used in the output to restore + // original source positions for a reader. + pos token.Position // current position in AST (source) space + out token.Position // current position in output space + last token.Position // value of pos after calling writeString + linePtr *int // if set, record out.Line for the next token in *linePtr + sourcePosErr error // if non-nil, the first error emitting a //line directive + + // The list of all source comments, in order of appearance. + comments []*ast.CommentGroup // may be nil + useNodeComments bool // if not set, ignore lead and line comments of nodes + + // Information about p.comments[p.cindex]; set up by nextComment. + commentInfo + + // Cache of already computed node sizes. + nodeSizes map[ast.Node]int + + // Cache of most recently computed line position. + cachedPos token.Pos + cachedLine int // line corresponding to cachedPos +} + +func (p *printer) internalError(msg ...any) { + if debug { + fmt.Print(p.pos.String() + ": ") + fmt.Println(msg...) + panic("mvdan.cc/gofumpt/internal/govendor/go/printer") + } +} + +// commentsHaveNewline reports whether a list of comments belonging to +// an *ast.CommentGroup contains newlines. Because the position information +// may only be partially correct, we also have to read the comment text. +func (p *printer) commentsHaveNewline(list []*ast.Comment) bool { + // len(list) > 0 + line := p.lineFor(list[0].Pos()) + for i, c := range list { + if i > 0 && p.lineFor(list[i].Pos()) != line { + // not all comments on the same line + return true + } + if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) { + return true + } + } + _ = line + return false +} + +func (p *printer) nextComment() { + for p.cindex < len(p.comments) { + c := p.comments[p.cindex] + p.cindex++ + if list := c.List; len(list) > 0 { + p.comment = c + p.commentOffset = p.posFor(list[0].Pos()).Offset + p.commentNewline = p.commentsHaveNewline(list) + return + } + // we should not reach here (correct ASTs don't have empty + // ast.CommentGroup nodes), but be conservative and try again + } + // no more comments + p.commentOffset = infinity +} + +// commentBefore reports whether the current comment group occurs +// before the next position in the source code and printing it does +// not introduce implicit semicolons. +func (p *printer) commentBefore(next token.Position) bool { + return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline) +} + +// commentSizeBefore returns the estimated size of the +// comments on the same line before the next position. +func (p *printer) commentSizeBefore(next token.Position) int { + // save/restore current p.commentInfo (p.nextComment() modifies it) + defer func(info commentInfo) { + p.commentInfo = info + }(p.commentInfo) + + size := 0 + for p.commentBefore(next) { + for _, c := range p.comment.List { + size += len(c.Text) + } + p.nextComment() + } + return size +} + +// recordLine records the output line number for the next non-whitespace +// token in *linePtr. It is used to compute an accurate line number for a +// formatted construct, independent of pending (not yet emitted) whitespace +// or comments. +func (p *printer) recordLine(linePtr *int) { + p.linePtr = linePtr +} + +// linesFrom returns the number of output lines between the current +// output line and the line argument, ignoring any pending (not yet +// emitted) whitespace or comments. It is used to compute an accurate +// size (in number of lines) for a formatted construct. +func (p *printer) linesFrom(line int) int { + return p.out.Line - line +} + +func (p *printer) posFor(pos token.Pos) token.Position { + // not used frequently enough to cache entire token.Position + return p.fset.PositionFor(pos, false /* absolute position */) +} + +func (p *printer) lineFor(pos token.Pos) int { + if pos != p.cachedPos { + p.cachedPos = pos + p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line + } + return p.cachedLine +} + +// writeLineDirective writes a //line directive if necessary. +func (p *printer) writeLineDirective(pos token.Position) { + if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) { + if strings.ContainsAny(pos.Filename, "\r\n") { + if p.sourcePosErr == nil { + p.sourcePosErr = fmt.Errorf("mvdan.cc/gofumpt/internal/govendor/go/printer: source filename contains unexpected newline character: %q", pos.Filename) + } + return + } + + p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation + p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...) + p.output = append(p.output, tabwriter.Escape) + // p.out must match the //line directive + p.out.Filename = pos.Filename + p.out.Line = pos.Line + } +} + +// writeIndent writes indentation. +func (p *printer) writeIndent() { + // use "hard" htabs - indentation columns + // must not be discarded by the tabwriter + n := p.Config.Indent + p.indent // include base indentation + for i := 0; i < n; i++ { + p.output = append(p.output, '\t') + } + + // update positions + p.pos.Offset += n + p.pos.Column += n + p.out.Column += n +} + +// writeByte writes ch n times to p.output and updates p.pos. +// Only used to write formatting (white space) characters. +func (p *printer) writeByte(ch byte, n int) { + if p.endAlignment { + // Ignore any alignment control character; + // and at the end of the line, break with + // a formfeed to indicate termination of + // existing columns. + switch ch { + case '\t', '\v': + ch = ' ' + case '\n', '\f': + ch = '\f' + p.endAlignment = false + } + } + + if p.out.Column == 1 { + // no need to write line directives before white space + p.writeIndent() + } + + for i := 0; i < n; i++ { + p.output = append(p.output, ch) + } + + // update positions + p.pos.Offset += n + if ch == '\n' || ch == '\f' { + p.pos.Line += n + p.out.Line += n + p.pos.Column = 1 + p.out.Column = 1 + return + } + p.pos.Column += n + p.out.Column += n +} + +// writeString writes the string s to p.output and updates p.pos, p.out, +// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters +// to protect s from being interpreted by the tabwriter. +// +// Note: writeString is only used to write Go tokens, literals, and +// comments, all of which must be written literally. Thus, it is correct +// to always set isLit = true. However, setting it explicitly only when +// needed (i.e., when we don't know that s contains no tabs or line breaks) +// avoids processing extra escape characters and reduces run time of the +// printer benchmark by up to 10%. +func (p *printer) writeString(pos token.Position, s string, isLit bool) { + if p.out.Column == 1 { + if p.Config.Mode&SourcePos != 0 { + p.writeLineDirective(pos) + } + p.writeIndent() + } + + if pos.IsValid() { + // update p.pos (if pos is invalid, continue with existing p.pos) + // Note: Must do this after handling line beginnings because + // writeIndent updates p.pos if there's indentation, but p.pos + // is the position of s. + p.pos = pos + } + + if isLit { + // Protect s such that is passes through the tabwriter + // unchanged. Note that valid Go programs cannot contain + // tabwriter.Escape bytes since they do not appear in legal + // UTF-8 sequences. + p.output = append(p.output, tabwriter.Escape) + } + + if debug { + p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos! + } + p.output = append(p.output, s...) + + // update positions + nlines := 0 + var li int // index of last newline; valid if nlines > 0 + for i := 0; i < len(s); i++ { + // Raw string literals may contain any character except back quote (`). + if ch := s[i]; ch == '\n' || ch == '\f' { + // account for line break + nlines++ + li = i + // A line break inside a literal will break whatever column + // formatting is in place; ignore any further alignment through + // the end of the line. + p.endAlignment = true + } + } + p.pos.Offset += len(s) + if nlines > 0 { + p.pos.Line += nlines + p.out.Line += nlines + c := len(s) - li + p.pos.Column = c + p.out.Column = c + } else { + p.pos.Column += len(s) + p.out.Column += len(s) + } + + if isLit { + p.output = append(p.output, tabwriter.Escape) + } + + p.last = p.pos +} + +// writeCommentPrefix writes the whitespace before a comment. +// If there is any pending whitespace, it consumes as much of +// it as is likely to help position the comment nicely. +// pos is the comment position, next the position of the item +// after all pending comments, prev is the previous comment in +// a group of comments (or nil), and tok is the next token. +func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) { + if len(p.output) == 0 { + // the comment is the first item to be printed - don't write any whitespace + return + } + + if pos.IsValid() && pos.Filename != p.last.Filename { + // comment in a different file - separate with newlines + p.writeByte('\f', maxNewlines) + return + } + + if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') { + // comment on the same line as last item: + // separate with at least one separator + hasSep := false + if prev == nil { + // first comment of a comment group + j := 0 + for i, ch := range p.wsbuf { + switch ch { + case blank: + // ignore any blanks before a comment + p.wsbuf[i] = ignore + continue + case vtab: + // respect existing tabs - important + // for proper formatting of commented structs + hasSep = true + continue + case indent: + // apply pending indentation + continue + } + j = i + break + } + p.writeWhitespace(j) + } + // make sure there is at least one separator + if !hasSep { + sep := byte('\t') + if pos.Line == next.Line { + // next item is on the same line as the comment + // (which must be a /*-style comment): separate + // with a blank instead of a tab + sep = ' ' + } + p.writeByte(sep, 1) + } + + } else { + // comment on a different line: + // separate with at least one line break + droppedLinebreak := false + j := 0 + for i, ch := range p.wsbuf { + switch ch { + case blank, vtab: + // ignore any horizontal whitespace before line breaks + p.wsbuf[i] = ignore + continue + case indent: + // apply pending indentation + continue + case unindent: + // if this is not the last unindent, apply it + // as it is (likely) belonging to the last + // construct (e.g., a multi-line expression list) + // and is not part of closing a block + if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent { + continue + } + // if the next token is not a closing }, apply the unindent + // if it appears that the comment is aligned with the + // token; otherwise assume the unindent is part of a + // closing block and stop (this scenario appears with + // comments before a case label where the comments + // apply to the next case instead of the current one) + if tok != token.RBRACE && pos.Column == next.Column { + continue + } + case newline, formfeed: + p.wsbuf[i] = ignore + droppedLinebreak = prev == nil // record only if first comment of a group + } + j = i + break + } + p.writeWhitespace(j) + + // determine number of linebreaks before the comment + n := 0 + if pos.IsValid() && p.last.IsValid() { + n = pos.Line - p.last.Line + if n < 0 { // should never happen + n = 0 + } + } + + // at the package scope level only (p.indent == 0), + // add an extra newline if we dropped one before: + // this preserves a blank line before documentation + // comments at the package scope level (issue 2570) + if p.indent == 0 && droppedLinebreak { + n++ + } + + // make sure there is at least one line break + // if the previous comment was a line comment + if n == 0 && prev != nil && prev.Text[1] == '/' { + n = 1 + } + + if n > 0 { + // use formfeeds to break columns before a comment; + // this is analogous to using formfeeds to separate + // individual lines of /*-style comments + p.writeByte('\f', nlimit(n)) + } + } +} + +// Returns true if s contains only white space +// (only tabs and blanks can appear in the printer's context). +func isBlank(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] > ' ' { + return false + } + } + return true +} + +// commonPrefix returns the common prefix of a and b. +func commonPrefix(a, b string) string { + i := 0 + for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') { + i++ + } + return a[0:i] +} + +// trimRight returns s with trailing whitespace removed. +func trimRight(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no +// comment line is indented, all but the first line have some form of space prefix). +// The prefix is computed using heuristics such that is likely that the comment +// contents are nicely laid out after re-printing each line using the printer's +// current indentation. +func stripCommonPrefix(lines []string) { + if len(lines) <= 1 { + return // at most one line - nothing to do + } + // len(lines) > 1 + + // The heuristic in this function tries to handle a few + // common patterns of /*-style comments: Comments where + // the opening /* and closing */ are aligned and the + // rest of the comment text is aligned and indented with + // blanks or tabs, cases with a vertical "line of stars" + // on the left, and cases where the closing */ is on the + // same line as the last comment text. + + // Compute maximum common white prefix of all but the first, + // last, and blank lines, and replace blank lines with empty + // lines (the first line starts with /* and has no prefix). + // In cases where only the first and last lines are not blank, + // such as two-line comments, or comments where all inner lines + // are blank, consider the last line for the prefix computation + // since otherwise the prefix would be empty. + // + // Note that the first and last line are never empty (they + // contain the opening /* and closing */ respectively) and + // thus they can be ignored by the blank line check. + prefix := "" + prefixSet := false + if len(lines) > 2 { + for i, line := range lines[1 : len(lines)-1] { + if isBlank(line) { + lines[1+i] = "" // range starts with lines[1] + } else { + if !prefixSet { + prefix = line + prefixSet = true + } + prefix = commonPrefix(prefix, line) + } + } + } + // If we don't have a prefix yet, consider the last line. + if !prefixSet { + line := lines[len(lines)-1] + prefix = commonPrefix(line, line) + } + + /* + * Check for vertical "line of stars" and correct prefix accordingly. + */ + lineOfStars := false + if p, _, ok := strings.Cut(prefix, "*"); ok { + // remove trailing blank from prefix so stars remain aligned + prefix = strings.TrimSuffix(p, " ") + lineOfStars = true + } else { + // No line of stars present. + // Determine the white space on the first line after the /* + // and before the beginning of the comment text, assume two + // blanks instead of the /* unless the first character after + // the /* is a tab. If the first comment line is empty but + // for the opening /*, assume up to 3 blanks or a tab. This + // whitespace may be found as suffix in the common prefix. + first := lines[0] + if isBlank(first[2:]) { + // no comment text on the first line: + // reduce prefix by up to 3 blanks or a tab + // if present - this keeps comment text indented + // relative to the /* and */'s if it was indented + // in the first place + i := len(prefix) + for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ { + i-- + } + if i == len(prefix) && i > 0 && prefix[i-1] == '\t' { + i-- + } + prefix = prefix[0:i] + } else { + // comment text on the first line + suffix := make([]byte, len(first)) + n := 2 // start after opening /* + for n < len(first) && first[n] <= ' ' { + suffix[n] = first[n] + n++ + } + if n > 2 && suffix[2] == '\t' { + // assume the '\t' compensates for the /* + suffix = suffix[2:n] + } else { + // otherwise assume two blanks + suffix[0], suffix[1] = ' ', ' ' + suffix = suffix[0:n] + } + // Shorten the computed common prefix by the length of + // suffix, if it is found as suffix of the prefix. + prefix = strings.TrimSuffix(prefix, string(suffix)) + } + } + + // Handle last line: If it only contains a closing */, align it + // with the opening /*, otherwise align the text with the other + // lines. + last := lines[len(lines)-1] + closing := "*/" + before, _, _ := strings.Cut(last, closing) // closing always present + if isBlank(before) { + // last line only contains closing */ + if lineOfStars { + closing = " */" // add blank to align final star + } + lines[len(lines)-1] = prefix + closing + } else { + // last line contains more comment text - assume + // it is aligned like the other lines and include + // in prefix computation + prefix = commonPrefix(prefix, last) + } + + // Remove the common prefix from all but the first and empty lines. + for i, line := range lines { + if i > 0 && line != "" { + lines[i] = line[len(prefix):] + } + } +} + +func (p *printer) writeComment(comment *ast.Comment) { + text := comment.Text + pos := p.posFor(comment.Pos()) + + const linePrefix = "//line " + if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) { + // Possibly a //-style line directive. + // Suspend indentation temporarily to keep line directive valid. + defer func(indent int) { p.indent = indent }(p.indent) + p.indent = 0 + } + + // shortcut common case of //-style comments + if text[1] == '/' { + if constraint.IsGoBuild(text) { + p.goBuild = append(p.goBuild, len(p.output)) + } else if constraint.IsPlusBuild(text) { + p.plusBuild = append(p.plusBuild, len(p.output)) + } + p.writeString(pos, trimRight(text), true) + return + } + + // for /*-style comments, print line by line and let the + // write function take care of the proper indentation + lines := strings.Split(text, "\n") + + // The comment started in the first column but is going + // to be indented. For an idempotent result, add indentation + // to all lines such that they look like they were indented + // before - this will make sure the common prefix computation + // is the same independent of how many times formatting is + // applied (was issue 1835). + if pos.IsValid() && pos.Column == 1 && p.indent > 0 { + for i, line := range lines[1:] { + lines[1+i] = " " + line + } + } + + stripCommonPrefix(lines) + + // write comment lines, separated by formfeed, + // without a line break after the last line + for i, line := range lines { + if i > 0 { + p.writeByte('\f', 1) + pos = p.pos + } + if len(line) > 0 { + p.writeString(pos, trimRight(line), true) + } + } +} + +// writeCommentSuffix writes a line break after a comment if indicated +// and processes any leftover indentation information. If a line break +// is needed, the kind of break (newline vs formfeed) depends on the +// pending whitespace. The writeCommentSuffix result indicates if a +// newline was written or if a formfeed was dropped from the whitespace +// buffer. +func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) { + for i, ch := range p.wsbuf { + switch ch { + case blank, vtab: + // ignore trailing whitespace + p.wsbuf[i] = ignore + case indent, unindent: + // don't lose indentation information + case newline, formfeed: + // if we need a line break, keep exactly one + // but remember if we dropped any formfeeds + if needsLinebreak { + needsLinebreak = false + wroteNewline = true + } else { + if ch == formfeed { + droppedFF = true + } + p.wsbuf[i] = ignore + } + } + } + p.writeWhitespace(len(p.wsbuf)) + + // make sure we have a line break + if needsLinebreak { + p.writeByte('\n', 1) + wroteNewline = true + } + + return +} + +// containsLinebreak reports whether the whitespace buffer contains any line breaks. +func (p *printer) containsLinebreak() bool { + for _, ch := range p.wsbuf { + if ch == newline || ch == formfeed { + return true + } + } + return false +} + +// intersperseComments consumes all comments that appear before the next token +// tok and prints it together with the buffered whitespace (i.e., the whitespace +// that needs to be written before the next token). A heuristic is used to mix +// the comments and whitespace. The intersperseComments result indicates if a +// newline was written or if a formfeed was dropped from the whitespace buffer. +func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { + var last *ast.Comment + for p.commentBefore(next) { + list := p.comment.List + changed := false + if p.lastTok != token.IMPORT && // do not rewrite cgo's import "C" comments + p.posFor(p.comment.Pos()).Column == 1 && + p.posFor(p.comment.End()+1) == next { + // Unindented comment abutting next token position: + // a top-level doc comment. + list = formatDocComment(list) + changed = true + + if len(p.comment.List) > 0 && len(list) == 0 { + // The doc comment was removed entirely. + // Keep preceding whitespace. + p.writeCommentPrefix(p.posFor(p.comment.Pos()), next, last, tok) + // Change print state to continue at next. + p.pos = next + p.last = next + // There can't be any more comments. + p.nextComment() + return p.writeCommentSuffix(false) + } + } + for _, c := range list { + p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok) + p.writeComment(c) + last = c + } + // In case list was rewritten, change print state to where + // the original list would have ended. + if len(p.comment.List) > 0 && changed { + last = p.comment.List[len(p.comment.List)-1] + p.pos = p.posFor(last.End()) + p.last = p.pos + } + p.nextComment() + } + + if last != nil { + // If the last comment is a /*-style comment and the next item + // follows on the same line but is not a comma, and not a "closing" + // token immediately following its corresponding "opening" token, + // add an extra separator unless explicitly disabled. Use a blank + // as separator unless we have pending linebreaks, they are not + // disabled, and we are outside a composite literal, in which case + // we want a linebreak (issue 15137). + // TODO(gri) This has become overly complicated. We should be able + // to track whether we're inside an expression or statement and + // use that information to decide more directly. + needsLinebreak := false + if p.mode&noExtraBlank == 0 && + last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line && + tok != token.COMMA && + (tok != token.RPAREN || p.prevOpen == token.LPAREN) && + (tok != token.RBRACK || p.prevOpen == token.LBRACK) { + if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 { + needsLinebreak = true + } else { + p.writeByte(' ', 1) + } + } + // Ensure that there is a line break after a //-style comment, + // before EOF, and before a closing '}' unless explicitly disabled. + if last.Text[1] == '/' || + tok == token.EOF || + tok == token.RBRACE && p.mode&noExtraLinebreak == 0 { + needsLinebreak = true + } + return p.writeCommentSuffix(needsLinebreak) + } + + // no comment was written - we should never reach here since + // intersperseComments should not be called in that case + p.internalError("intersperseComments called without pending comments") + return +} + +// writeWhitespace writes the first n whitespace entries. +func (p *printer) writeWhitespace(n int) { + // write entries + for i := 0; i < n; i++ { + switch ch := p.wsbuf[i]; ch { + case ignore: + // ignore! + case indent: + p.indent++ + case unindent: + p.indent-- + if p.indent < 0 { + p.internalError("negative indentation:", p.indent) + p.indent = 0 + } + case newline, formfeed: + // A line break immediately followed by a "correcting" + // unindent is swapped with the unindent - this permits + // proper label positioning. If a comment is between + // the line break and the label, the unindent is not + // part of the comment whitespace prefix and the comment + // will be positioned correctly indented. + if i+1 < n && p.wsbuf[i+1] == unindent { + // Use a formfeed to terminate the current section. + // Otherwise, a long label name on the next line leading + // to a wide column may increase the indentation column + // of lines before the label; effectively leading to wrong + // indentation. + p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed + i-- // do it again + continue + } + fallthrough + default: + p.writeByte(byte(ch), 1) + } + } + + // shift remaining entries down + l := copy(p.wsbuf, p.wsbuf[n:]) + p.wsbuf = p.wsbuf[:l] +} + +// ---------------------------------------------------------------------------- +// Printing interface + +// nlimit limits n to maxNewlines. +func nlimit(n int) int { + if n > maxNewlines { + n = maxNewlines + } + return n +} + +func mayCombine(prev token.Token, next byte) (b bool) { + switch prev { + case token.INT: + b = next == '.' // 1. + case token.ADD: + b = next == '+' // ++ + case token.SUB: + b = next == '-' // -- + case token.QUO: + b = next == '*' // /* + case token.LSS: + b = next == '-' || next == '<' // <- or << + case token.AND: + b = next == '&' || next == '^' // && or &^ + } + return +} + +func (p *printer) setPos(pos token.Pos) { + if pos.IsValid() { + p.pos = p.posFor(pos) // accurate position of next item + } +} + +// print prints a list of "items" (roughly corresponding to syntactic +// tokens, but also including whitespace and formatting information). +// It is the only print function that should be called directly from +// any of the AST printing functions in nodes.go. +// +// Whitespace is accumulated until a non-whitespace token appears. Any +// comments that need to appear before that token are printed first, +// taking into account the amount and structure of any pending white- +// space for best comment placement. Then, any leftover whitespace is +// printed, followed by the actual token. +func (p *printer) print(args ...any) { + for _, arg := range args { + // information about the current arg + var data string + var isLit bool + var impliedSemi bool // value for p.impliedSemi after this arg + + // record previous opening token, if any + switch p.lastTok { + case token.ILLEGAL: + // ignore (white space) + case token.LPAREN, token.LBRACK: + p.prevOpen = p.lastTok + default: + // other tokens followed any opening token + p.prevOpen = token.ILLEGAL + } + + switch x := arg.(type) { + case pmode: + // toggle printer mode + p.mode ^= x + continue + + case whiteSpace: + if x == ignore { + // don't add ignore's to the buffer; they + // may screw up "correcting" unindents (see + // LabeledStmt) + continue + } + i := len(p.wsbuf) + if i == cap(p.wsbuf) { + // Whitespace sequences are very short so this should + // never happen. Handle gracefully (but possibly with + // bad comment placement) if it does happen. + p.writeWhitespace(i) + i = 0 + } + p.wsbuf = p.wsbuf[0 : i+1] + p.wsbuf[i] = x + if x == newline || x == formfeed { + // newlines affect the current state (p.impliedSemi) + // and not the state after printing arg (impliedSemi) + // because comments can be interspersed before the arg + // in this case + p.impliedSemi = false + } + p.lastTok = token.ILLEGAL + continue + + case *ast.Ident: + data = x.Name + impliedSemi = true + p.lastTok = token.IDENT + + case *ast.BasicLit: + data = x.Value + isLit = true + impliedSemi = true + p.lastTok = x.Kind + + case token.Token: + s := x.String() + if mayCombine(p.lastTok, s[0]) { + // the previous and the current token must be + // separated by a blank otherwise they combine + // into a different incorrect token sequence + // (except for token.INT followed by a '.' this + // should never happen because it is taken care + // of via binary expression formatting) + if len(p.wsbuf) != 0 { + p.internalError("whitespace buffer not empty") + } + p.wsbuf = p.wsbuf[0:1] + p.wsbuf[0] = ' ' + } + data = s + // some keywords followed by a newline imply a semicolon + switch x { + case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN, + token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE: + impliedSemi = true + } + p.lastTok = x + + case string: + // incorrect AST - print error message + data = x + isLit = true + impliedSemi = true + p.lastTok = token.STRING + + default: + fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg) + panic("mvdan.cc/gofumpt/internal/govendor/go/printer type") + } + // data != "" + + next := p.pos // estimated/accurate position of next item + wroteNewline, droppedFF := p.flush(next, p.lastTok) + + // intersperse extra newlines if present in the source and + // if they don't cause extra semicolons (don't do this in + // flush as it will cause extra newlines at the end of a file) + if !p.impliedSemi { + n := nlimit(next.Line - p.pos.Line) + // don't exceed maxNewlines if we already wrote one + if wroteNewline && n == maxNewlines { + n = maxNewlines - 1 + } + if n > 0 { + ch := byte('\n') + if droppedFF { + ch = '\f' // use formfeed since we dropped one before + } + p.writeByte(ch, n) + impliedSemi = false + } + } + + // the next token starts now - record its line number if requested + if p.linePtr != nil { + *p.linePtr = p.out.Line + p.linePtr = nil + } + + p.writeString(next, data, isLit) + p.impliedSemi = impliedSemi + } +} + +// flush prints any pending comments and whitespace occurring textually +// before the position of the next token tok. The flush result indicates +// if a newline was written or if a formfeed was dropped from the whitespace +// buffer. +func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) { + if p.commentBefore(next) { + // if there are comments before the next item, intersperse them + wroteNewline, droppedFF = p.intersperseComments(next, tok) + } else { + // otherwise, write any leftover whitespace + p.writeWhitespace(len(p.wsbuf)) + } + return +} + +// getDoc returns the ast.CommentGroup associated with n, if any. +func getDoc(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.Field: + return n.Doc + case *ast.ImportSpec: + return n.Doc + case *ast.ValueSpec: + return n.Doc + case *ast.TypeSpec: + return n.Doc + case *ast.GenDecl: + return n.Doc + case *ast.FuncDecl: + return n.Doc + case *ast.File: + return n.Doc + } + return nil +} + +func getLastComment(n ast.Node) *ast.CommentGroup { + switch n := n.(type) { + case *ast.Field: + return n.Comment + case *ast.ImportSpec: + return n.Comment + case *ast.ValueSpec: + return n.Comment + case *ast.TypeSpec: + return n.Comment + case *ast.GenDecl: + if len(n.Specs) > 0 { + return getLastComment(n.Specs[len(n.Specs)-1]) + } + case *ast.File: + if len(n.Comments) > 0 { + return n.Comments[len(n.Comments)-1] + } + } + return nil +} + +func (p *printer) printNode(node any) error { + // unpack *CommentedNode, if any + var comments []*ast.CommentGroup + if cnode, ok := node.(*CommentedNode); ok { + node = cnode.Node + comments = cnode.Comments + } + + if comments != nil { + // commented node - restrict comment list to relevant range + n, ok := node.(ast.Node) + if !ok { + goto unsupported + } + beg := n.Pos() + end := n.End() + // if the node has associated documentation, + // include that commentgroup in the range + // (the comment list is sorted in the order + // of the comment appearance in the source code) + if doc := getDoc(n); doc != nil { + beg = doc.Pos() + } + if com := getLastComment(n); com != nil { + if e := com.End(); e > end { + end = e + } + } + // token.Pos values are global offsets, we can + // compare them directly + i := 0 + for i < len(comments) && comments[i].End() < beg { + i++ + } + j := i + for j < len(comments) && comments[j].Pos() < end { + j++ + } + if i < j { + p.comments = comments[i:j] + } + } else if n, ok := node.(*ast.File); ok { + // use ast.File comments, if any + p.comments = n.Comments + } + + // if there are no comments, use node comments + p.useNodeComments = p.comments == nil + + // get comments ready for use + p.nextComment() + + p.print(pmode(0)) + + // format node + switch n := node.(type) { + case ast.Expr: + p.expr(n) + case ast.Stmt: + // A labeled statement will un-indent to position the label. + // Set p.indent to 1 so we don't get indent "underflow". + if _, ok := n.(*ast.LabeledStmt); ok { + p.indent = 1 + } + p.stmt(n, false) + case ast.Decl: + p.decl(n) + case ast.Spec: + p.spec(n, 1, false) + case []ast.Stmt: + // A labeled statement will un-indent to position the label. + // Set p.indent to 1 so we don't get indent "underflow". + for _, s := range n { + if _, ok := s.(*ast.LabeledStmt); ok { + p.indent = 1 + } + } + p.stmtList(n, 0, false) + case []ast.Decl: + p.declList(n) + case *ast.File: + p.file(n) + default: + goto unsupported + } + + return p.sourcePosErr + +unsupported: + return fmt.Errorf("mvdan.cc/gofumpt/internal/govendor/go/printer: unsupported node type %T", node) +} + +// ---------------------------------------------------------------------------- +// Trimmer + +// A trimmer is an io.Writer filter for stripping tabwriter.Escape +// characters, trailing blanks and tabs, and for converting formfeed +// and vtab characters into newlines and htabs (in case no tabwriter +// is used). Text bracketed by tabwriter.Escape characters is passed +// through unchanged. +type trimmer struct { + output io.Writer + state int + space []byte +} + +// trimmer is implemented as a state machine. +// It can be in one of the following states: +const ( + inSpace = iota // inside space + inEscape // inside text bracketed by tabwriter.Escapes + inText // inside text +) + +func (p *trimmer) resetSpace() { + p.state = inSpace + p.space = p.space[0:0] +} + +// Design note: It is tempting to eliminate extra blanks occurring in +// whitespace in this function as it could simplify some +// of the blanks logic in the node printing functions. +// However, this would mess up any formatting done by +// the tabwriter. + +var aNewline = []byte("\n") + +func (p *trimmer) Write(data []byte) (n int, err error) { + // invariants: + // p.state == inSpace: + // p.space is unwritten + // p.state == inEscape, inText: + // data[m:n] is unwritten + m := 0 + var b byte + for n, b = range data { + if b == '\v' { + b = '\t' // convert to htab + } + switch p.state { + case inSpace: + switch b { + case '\t', ' ': + p.space = append(p.space, b) + case '\n', '\f': + p.resetSpace() // discard trailing space + _, err = p.output.Write(aNewline) + case tabwriter.Escape: + _, err = p.output.Write(p.space) + p.state = inEscape + m = n + 1 // +1: skip tabwriter.Escape + default: + _, err = p.output.Write(p.space) + p.state = inText + m = n + } + case inEscape: + if b == tabwriter.Escape { + _, err = p.output.Write(data[m:n]) + p.resetSpace() + } + case inText: + switch b { + case '\t', ' ': + _, err = p.output.Write(data[m:n]) + p.resetSpace() + p.space = append(p.space, b) + case '\n', '\f': + _, err = p.output.Write(data[m:n]) + p.resetSpace() + if err == nil { + _, err = p.output.Write(aNewline) + } + case tabwriter.Escape: + _, err = p.output.Write(data[m:n]) + p.state = inEscape + m = n + 1 // +1: skip tabwriter.Escape + } + default: + panic("unreachable") + } + if err != nil { + return + } + } + n = len(data) + + switch p.state { + case inEscape, inText: + _, err = p.output.Write(data[m:n]) + p.resetSpace() + } + + return +} + +// ---------------------------------------------------------------------------- +// Public interface + +// A Mode value is a set of flags (or 0). They control printing. +type Mode uint + +const ( + RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored + TabIndent // use tabs for indentation independent of UseSpaces + UseSpaces // use spaces instead of tabs for alignment + SourcePos // emit //line directives to preserve original source positions +) + +// The mode below is not included in printer's public API because +// editing code text is deemed out of scope. Because this mode is +// unexported, it's also possible to modify or remove it based on +// the evolving needs of mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt without breaking +// users. See discussion in CL 240683. +const ( + // normalizeNumbers means to canonicalize number + // literal prefixes and exponents while printing. + // + // This value is known in and used by mvdan.cc/gofumpt/internal/govendor/go/format and cmd/gofmt. + // It is currently more convenient and performant for those + // packages to apply number normalization during printing, + // rather than by modifying the AST in advance. + normalizeNumbers Mode = 1 << 30 +) + +// A Config node controls the output of Fprint. +type Config struct { + Mode Mode // default: 0 + Tabwidth int // default: 8 + Indent int // default: 0 (all code is indented at least by this much) +} + +var printerPool = sync.Pool{ + New: func() any { + return &printer{ + // Whitespace sequences are short. + wsbuf: make([]whiteSpace, 0, 16), + // We start the printer with a 16K output buffer, which is currently + // larger than about 80% of Go files in the standard library. + output: make([]byte, 0, 16<<10), + } + }, +} + +func newPrinter(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) *printer { + p := printerPool.Get().(*printer) + *p = printer{ + Config: *cfg, + fset: fset, + pos: token.Position{Line: 1, Column: 1}, + out: token.Position{Line: 1, Column: 1}, + wsbuf: p.wsbuf[:0], + nodeSizes: nodeSizes, + cachedPos: -1, + output: p.output[:0], + } + return p +} + +func (p *printer) free() { + // Hard limit on buffer size; see https://golang.org/issue/23199. + if cap(p.output) > 64<<10 { + return + } + + printerPool.Put(p) +} + +// fprint implements Fprint and takes a nodesSizes map for setting up the printer state. +func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node any, nodeSizes map[ast.Node]int) (err error) { + // print node + p := newPrinter(cfg, fset, nodeSizes) + defer p.free() + if err = p.printNode(node); err != nil { + return + } + // print outstanding comments + p.impliedSemi = false // EOF acts like a newline + p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF) + + // output is buffered in p.output now. + // fix //go:build and // +build comments if needed. + p.fixGoBuildLines() + + // redirect output through a trimmer to eliminate trailing whitespace + // (Input to a tabwriter must be untrimmed since trailing tabs provide + // formatting information. The tabwriter could provide trimming + // functionality but no tabwriter is used when RawFormat is set.) + output = &trimmer{output: output} + + // redirect output through a tabwriter if necessary + if cfg.Mode&RawFormat == 0 { + minwidth := cfg.Tabwidth + + padchar := byte('\t') + if cfg.Mode&UseSpaces != 0 { + padchar = ' ' + } + + twmode := tabwriter.DiscardEmptyColumns + if cfg.Mode&TabIndent != 0 { + minwidth = 0 + twmode |= tabwriter.TabIndent + } + + output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode) + } + + // write printer result via tabwriter/trimmer to output + if _, err = output.Write(p.output); err != nil { + return + } + + // flush tabwriter, if any + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return +} + +// A CommentedNode bundles an AST node and corresponding comments. +// It may be provided as argument to any of the Fprint functions. +type CommentedNode struct { + Node any // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt + Comments []*ast.CommentGroup +} + +// Fprint "pretty-prints" an AST node to output for a given configuration cfg. +// Position information is interpreted relative to the file set fset. +// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt, +// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt. +func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node any) error { + return cfg.fprint(output, fset, node, make(map[ast.Node]int)) +} + +// Fprint "pretty-prints" an AST node to output. +// It calls Config.Fprint with default settings. +// Note that gofmt uses tabs for indentation but spaces for alignment; +// use format.Node (package mvdan.cc/gofumpt/internal/govendor/go/format) for output that matches gofmt. +func Fprint(output io.Writer, fset *token.FileSet, node any) error { + return (&Config{Tabwidth: 8}).Fprint(output, fset, node) +} diff --git a/vendor/mvdan.cc/gofumpt/internal/version/version.go b/vendor/mvdan.cc/gofumpt/internal/version/version.go index 992930480f..785b6b317d 100644 --- a/vendor/mvdan.cc/gofumpt/internal/version/version.go +++ b/vendor/mvdan.cc/gofumpt/internal/version/version.go @@ -97,6 +97,9 @@ func goVersion() string { return runtime.Version() } -func String() string { +func String(injected string) string { + if injected != "" { + return fmt.Sprintf("%s (%s)", injected, goVersion()) + } return fmt.Sprintf("%s (%s)", gofumptVersion(), goVersion()) } diff --git a/vendor/mvdan.cc/unparam/check/check.go b/vendor/mvdan.cc/unparam/check/check.go index dcc541064c..39de800d95 100644 --- a/vendor/mvdan.cc/unparam/check/check.go +++ b/vendor/mvdan.cc/unparam/check/check.go @@ -21,7 +21,6 @@ import ( "sort" "strings" - "golang.org/x/exp/typeparams" "golang.org/x/tools/go/packages" "golang.org/x/tools/go/ssa" "golang.org/x/tools/go/ssa/ssautil" @@ -582,6 +581,8 @@ resLoop: c.addIssue(fn, res.Pos(), "result %s is never used", name) } + fnIsGeneric := fn.TypeParams().Len() > 0 + for i, par := range fn.Params { if paramsBy != "" { continue // we can't change the params @@ -590,14 +591,19 @@ resLoop: continue } c.debug("%s\n", par.String()) - switch par.Object().Name() { - case "", "_": // unnamed - c.debug(" skip - unnamed\n") + if name := par.Object().Name(); name == "" || name[0] == '_' { + c.debug(" skip - no name or underscore name\n") continue } - if stdSizes.Sizeof(par.Type()) == 0 { - c.debug(" skip - zero size\n") - continue + t := par.Type() + // asking for the size of a type param would panic, as it is unknowable + if !fnIsGeneric || !containsTypeParam(t) { + if stdSizes.Sizeof(par.Type()) == 0 { + c.debug(" skip - zero size\n") + continue + } + } else { + c.debug(" examine - type parameter\n") } reason := "is unused" constStr := c.alwaysReceivedConst(callSites, par, i) @@ -611,6 +617,30 @@ resLoop: } } +func containsTypeParam(t types.Type) bool { + switch t := t.(type) { + case *types.TypeParam, *types.Union: + return true + case *types.Struct: + nf := t.NumFields() + for i := 0; i < nf; i++ { + if containsTypeParam(t.Field(nf).Type()) { + return true + } + } + case *types.Array: + return containsTypeParam(t.Elem()) + case *types.Named: + args := t.TypeArgs() + for i := 0; i < args.Len(); i++ { + if containsTypeParam(args.At(i)) { + return true + } + } + } + return false +} + // nodeStr stringifies a syntax tree node. It is only meant for simple nodes, // such as short value expressions. func nodeStr(node ast.Node) string { @@ -881,19 +911,12 @@ func recvPrefix(recv *ast.FieldList) string { switch expr := expr.(type) { case *ast.Ident: return expr.Name + "." + case *ast.IndexExpr: + return expr.X.(*ast.Ident).Name + "." + case *ast.IndexListExpr: + return expr.X.(*ast.Ident).Name + "." default: - x, _, _, _ := typeparams.UnpackIndexExpr(expr) - if x == nil { - panic(fmt.Sprintf("unexepected receiver AST node: %T", expr)) - } - return x.(*ast.Ident).Name + "." - // TODO: remove the use of x/exp/typeparams once we drop Go 1.17 - // case *ast.IndexExpr: - // return expr.X.(*ast.Ident).Name + "." - // case *ast.IndexListExpr: - // return expr.X.(*ast.Ident).Name + "." - // default: - // panic(fmt.Sprintf("unexepected receiver AST node: %T", expr)) + panic(fmt.Sprintf("unexpected receiver AST node: %T", expr)) } }