`.
+if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag
+
+exit-code is 1, if there are any validation errors
+
+`jv` can also validate yaml files. It also accepts schema from yaml files.
+
+## Validating YAML Documents
+
+since yaml supports non-string keys, such yaml documents are rendered as invalid json documents.
+
+most yaml parser use `map[interface{}]interface{}` for object,
+whereas json parser uses `map[string]interface{}`.
+
+so we need to manually convert them to `map[string]interface{}`.
+below code shows such conversion by `toStringKeys` function.
+
+https://play.golang.org/p/Hhax3MrtD8r
+
+NOTE: if you are using `gopkg.in/yaml.v3`, then you do not need such conversion. since this library
+returns `map[string]interface{}` if all keys are strings.
\ No newline at end of file
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go
new file mode 100644
index 0000000000..fdb68e6480
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go
@@ -0,0 +1,812 @@
+package jsonschema
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/big"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// A Compiler represents a json-schema compiler.
+type Compiler struct {
+ // Draft represents the draft used when '$schema' attribute is missing.
+ //
+ // This defaults to latest supported draft (currently 2020-12).
+ Draft *Draft
+ resources map[string]*resource
+
+ // Extensions is used to register extensions.
+ extensions map[string]extension
+
+ // ExtractAnnotations tells whether schema annotations has to be extracted
+ // in compiled Schema or not.
+ ExtractAnnotations bool
+
+ // LoadURL loads the document at given absolute URL.
+ //
+ // If nil, package global LoadURL is used.
+ LoadURL func(s string) (io.ReadCloser, error)
+
+ // Formats can be registered by adding to this map. Key is format name,
+ // value is function that knows how to validate that format.
+ Formats map[string]func(interface{}) bool
+
+ // AssertFormat for specifications >= draft2019-09.
+ AssertFormat bool
+
+ // Decoders can be registered by adding to this map. Key is encoding name,
+ // value is function that knows how to decode string in that format.
+ Decoders map[string]func(string) ([]byte, error)
+
+ // MediaTypes can be registered by adding to this map. Key is mediaType name,
+ // value is function that knows how to validate that mediaType.
+ MediaTypes map[string]func([]byte) error
+
+ // AssertContent for specifications >= draft2019-09.
+ AssertContent bool
+}
+
+// Compile parses json-schema at given url returns, if successful,
+// a Schema object that can be used to match against json.
+//
+// Returned error can be *SchemaError
+func Compile(url string) (*Schema, error) {
+ return NewCompiler().Compile(url)
+}
+
+// MustCompile is like Compile but panics if the url cannot be compiled to *Schema.
+// It simplifies safe initialization of global variables holding compiled Schemas.
+func MustCompile(url string) *Schema {
+ return NewCompiler().MustCompile(url)
+}
+
+// CompileString parses and compiles the given schema with given base url.
+func CompileString(url, schema string) (*Schema, error) {
+ c := NewCompiler()
+ if err := c.AddResource(url, strings.NewReader(schema)); err != nil {
+ return nil, err
+ }
+ return c.Compile(url)
+}
+
+// MustCompileString is like CompileString but panics on error.
+// It simplified safe initialization of global variables holding compiled Schema.
+func MustCompileString(url, schema string) *Schema {
+ c := NewCompiler()
+ if err := c.AddResource(url, strings.NewReader(schema)); err != nil {
+ panic(err)
+ }
+ return c.MustCompile(url)
+}
+
+// NewCompiler returns a json-schema Compiler object.
+// if '$schema' attribute is missing, it is treated as draft7. to change this
+// behavior change Compiler.Draft value
+func NewCompiler() *Compiler {
+ return &Compiler{
+ Draft: latest,
+ resources: make(map[string]*resource),
+ Formats: make(map[string]func(interface{}) bool),
+ Decoders: make(map[string]func(string) ([]byte, error)),
+ MediaTypes: make(map[string]func([]byte) error),
+ extensions: make(map[string]extension),
+ }
+}
+
+// AddResource adds in-memory resource to the compiler.
+//
+// Note that url must not have fragment
+func (c *Compiler) AddResource(url string, r io.Reader) error {
+ res, err := newResource(url, r)
+ if err != nil {
+ return err
+ }
+ c.resources[res.url] = res
+ return nil
+}
+
+// MustCompile is like Compile but panics if the url cannot be compiled to *Schema.
+// It simplifies safe initialization of global variables holding compiled Schemas.
+func (c *Compiler) MustCompile(url string) *Schema {
+ s, err := c.Compile(url)
+ if err != nil {
+ panic(fmt.Sprintf("jsonschema: %#v", err))
+ }
+ return s
+}
+
+// Compile parses json-schema at given url returns, if successful,
+// a Schema object that can be used to match against json.
+//
+// error returned will be of type *SchemaError
+func (c *Compiler) Compile(url string) (*Schema, error) {
+ // make url absolute
+ u, err := toAbs(url)
+ if err != nil {
+ return nil, &SchemaError{url, err}
+ }
+ url = u
+
+ sch, err := c.compileURL(url, nil, "#")
+ if err != nil {
+ err = &SchemaError{url, err}
+ }
+ return sch, err
+}
+
+func (c *Compiler) findResource(url string) (*resource, error) {
+ if _, ok := c.resources[url]; !ok {
+ // load resource
+ var rdr io.Reader
+ if sch, ok := vocabSchemas[url]; ok {
+ rdr = strings.NewReader(sch)
+ } else {
+ loadURL := LoadURL
+ if c.LoadURL != nil {
+ loadURL = c.LoadURL
+ }
+ r, err := loadURL(url)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+ rdr = r
+ }
+ if err := c.AddResource(url, rdr); err != nil {
+ return nil, err
+ }
+ }
+
+ r := c.resources[url]
+ if r.draft != nil {
+ return r, nil
+ }
+
+ // set draft
+ r.draft = c.Draft
+ if m, ok := r.doc.(map[string]interface{}); ok {
+ if sch, ok := m["$schema"]; ok {
+ sch, ok := sch.(string)
+ if !ok {
+ return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url)
+ }
+ if !isURI(sch) {
+ return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url)
+ }
+ r.draft = findDraft(sch)
+ if r.draft == nil {
+ sch, _ := split(sch)
+ if sch == url {
+ return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url)
+ }
+ mr, err := c.findResource(sch)
+ if err != nil {
+ return nil, err
+ }
+ r.draft = mr.draft
+ }
+ }
+ }
+
+ id, err := r.draft.resolveID(r.url, r.doc)
+ if err != nil {
+ return nil, err
+ }
+ if id != "" {
+ r.url = id
+ }
+
+ if err := r.fillSubschemas(c, r); err != nil {
+ return nil, err
+ }
+
+ return r, nil
+}
+
+func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) {
+ // if url points to a draft, return Draft.meta
+ if d := findDraft(url); d != nil && d.meta != nil {
+ return d.meta, nil
+ }
+
+ b, f := split(url)
+ r, err := c.findResource(b)
+ if err != nil {
+ return nil, err
+ }
+ return c.compileRef(r, stack, ptr, r, f)
+}
+
+func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) {
+ base := r.baseURL(res.floc)
+ ref, err := resolveURL(base, ref)
+ if err != nil {
+ return nil, err
+ }
+
+ u, f := split(ref)
+ sr := r.findResource(u)
+ if sr == nil {
+ // external resource
+ return c.compileURL(ref, stack, refPtr)
+ }
+
+ // ensure root resource is always compiled first.
+ // this is required to get schema.meta from root resource
+ if r.schema == nil {
+ r.schema = newSchema(r.url, r.floc, r.draft, r.doc)
+ if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil {
+ return nil, err
+ }
+ }
+
+ sr, err = r.resolveFragment(c, sr, f)
+ if err != nil {
+ return nil, err
+ }
+ if sr == nil {
+ return nil, fmt.Errorf("jsonschema: %s not found", ref)
+ }
+
+ if sr.schema != nil {
+ if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil {
+ return nil, err
+ }
+ return sr.schema, nil
+ }
+
+ sr.schema = newSchema(r.url, sr.floc, r.draft, sr.doc)
+ return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr)
+}
+
+func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error {
+ if r.draft.version < 2020 {
+ return nil
+ }
+
+ rr := r.listResources(res)
+ rr = append(rr, res)
+ for _, sr := range rr {
+ if m, ok := sr.doc.(map[string]interface{}); ok {
+ if _, ok := m["$dynamicAnchor"]; ok {
+ sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc)
+ if err != nil {
+ return err
+ }
+ res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch)
+ }
+ }
+ }
+ return nil
+}
+
+func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) {
+ if err := c.compileDynamicAnchors(r, res); err != nil {
+ return nil, err
+ }
+
+ switch v := res.doc.(type) {
+ case bool:
+ res.schema.Always = &v
+ return res.schema, nil
+ default:
+ return res.schema, c.compileMap(r, stack, sref, res)
+ }
+}
+
+func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error {
+ m := res.doc.(map[string]interface{})
+
+ if err := checkLoop(stack, sref); err != nil {
+ return err
+ }
+ stack = append(stack, sref)
+
+ var s = res.schema
+ var err error
+
+ if r == res { // root schema
+ if sch, ok := m["$schema"]; ok {
+ sch := sch.(string)
+ if d := findDraft(sch); d != nil {
+ s.meta = d.meta
+ } else {
+ if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ if ref, ok := m["$ref"]; ok {
+ s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string))
+ if err != nil {
+ return err
+ }
+ if r.draft.version < 2019 {
+ // All other properties in a "$ref" object MUST be ignored
+ return nil
+ }
+ }
+
+ if r.draft.version >= 2019 {
+ if r == res { // root schema
+ if vocab, ok := m["$vocabulary"]; ok {
+ for url, reqd := range vocab.(map[string]interface{}) {
+ if reqd, ok := reqd.(bool); ok && !reqd {
+ continue
+ }
+ if !r.draft.isVocab(url) {
+ return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res)
+ }
+ s.vocab = append(s.vocab, url)
+ }
+ } else {
+ s.vocab = r.draft.defaultVocab
+ }
+ }
+
+ if ref, ok := m["$recursiveRef"]; ok {
+ s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if r.draft.version >= 2020 {
+ if dref, ok := m["$dynamicRef"]; ok {
+ s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string))
+ if err != nil {
+ return err
+ }
+ if dref, ok := dref.(string); ok {
+ _, frag := split(dref)
+ if frag != "#" && !strings.HasPrefix(frag, "#/") {
+ // frag is anchor
+ s.dynamicRefAnchor = frag[1:]
+ }
+ }
+ }
+ }
+
+ loadInt := func(pname string) int {
+ if num, ok := m[pname]; ok {
+ i, _ := num.(json.Number).Float64()
+ return int(i)
+ }
+ return -1
+ }
+
+ loadRat := func(pname string) *big.Rat {
+ if num, ok := m[pname]; ok {
+ r, _ := new(big.Rat).SetString(string(num.(json.Number)))
+ return r
+ }
+ return nil
+ }
+
+ if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") {
+ if t, ok := m["type"]; ok {
+ switch t := t.(type) {
+ case string:
+ s.Types = []string{t}
+ case []interface{}:
+ s.Types = toStrings(t)
+ }
+ }
+
+ if e, ok := m["enum"]; ok {
+ s.Enum = e.([]interface{})
+ allPrimitives := true
+ for _, item := range s.Enum {
+ switch jsonType(item) {
+ case "object", "array":
+ allPrimitives = false
+ break
+ }
+ }
+ s.enumError = "enum failed"
+ if allPrimitives {
+ if len(s.Enum) == 1 {
+ s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0])
+ } else {
+ strEnum := make([]string, len(s.Enum))
+ for i, item := range s.Enum {
+ strEnum[i] = fmt.Sprintf("%#v", item)
+ }
+ s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", "))
+ }
+ }
+ }
+
+ s.Minimum = loadRat("minimum")
+ if exclusive, ok := m["exclusiveMinimum"]; ok {
+ if exclusive, ok := exclusive.(bool); ok {
+ if exclusive {
+ s.Minimum, s.ExclusiveMinimum = nil, s.Minimum
+ }
+ } else {
+ s.ExclusiveMinimum = loadRat("exclusiveMinimum")
+ }
+ }
+
+ s.Maximum = loadRat("maximum")
+ if exclusive, ok := m["exclusiveMaximum"]; ok {
+ if exclusive, ok := exclusive.(bool); ok {
+ if exclusive {
+ s.Maximum, s.ExclusiveMaximum = nil, s.Maximum
+ }
+ } else {
+ s.ExclusiveMaximum = loadRat("exclusiveMaximum")
+ }
+ }
+
+ s.MultipleOf = loadRat("multipleOf")
+
+ s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties")
+
+ if req, ok := m["required"]; ok {
+ s.Required = toStrings(req.([]interface{}))
+ }
+
+ s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems")
+
+ if unique, ok := m["uniqueItems"]; ok {
+ s.UniqueItems = unique.(bool)
+ }
+
+ s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength")
+
+ if pattern, ok := m["pattern"]; ok {
+ s.Pattern = regexp.MustCompile(pattern.(string))
+ }
+
+ if r.draft.version >= 2019 {
+ s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains")
+ if s.MinContains == -1 {
+ s.MinContains = 1
+ }
+
+ if deps, ok := m["dependentRequired"]; ok {
+ deps := deps.(map[string]interface{})
+ s.DependentRequired = make(map[string][]string, len(deps))
+ for pname, pvalue := range deps {
+ s.DependentRequired[pname] = toStrings(pvalue.([]interface{}))
+ }
+ }
+ }
+ }
+
+ compile := func(stack []schemaRef, ptr string) (*Schema, error) {
+ return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr)
+ }
+
+ loadSchema := func(pname string, stack []schemaRef) (*Schema, error) {
+ if _, ok := m[pname]; ok {
+ return compile(stack, escape(pname))
+ }
+ return nil, nil
+ }
+
+ loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) {
+ if pvalue, ok := m[pname]; ok {
+ pvalue := pvalue.([]interface{})
+ schemas := make([]*Schema, len(pvalue))
+ for i := range pvalue {
+ sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i))
+ if err != nil {
+ return nil, err
+ }
+ schemas[i] = sch
+ }
+ return schemas, nil
+ }
+ return nil, nil
+ }
+
+ if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") {
+ if s.Not, err = loadSchema("not", stack); err != nil {
+ return err
+ }
+ if s.AllOf, err = loadSchemas("allOf", stack); err != nil {
+ return err
+ }
+ if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil {
+ return err
+ }
+ if s.OneOf, err = loadSchemas("oneOf", stack); err != nil {
+ return err
+ }
+
+ if props, ok := m["properties"]; ok {
+ props := props.(map[string]interface{})
+ s.Properties = make(map[string]*Schema, len(props))
+ for pname := range props {
+ s.Properties[pname], err = compile(nil, "properties/"+escape(pname))
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if regexProps, ok := m["regexProperties"]; ok {
+ s.RegexProperties = regexProps.(bool)
+ }
+
+ if patternProps, ok := m["patternProperties"]; ok {
+ patternProps := patternProps.(map[string]interface{})
+ s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps))
+ for pattern := range patternProps {
+ s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern))
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if additionalProps, ok := m["additionalProperties"]; ok {
+ switch additionalProps := additionalProps.(type) {
+ case bool:
+ s.AdditionalProperties = additionalProps
+ case map[string]interface{}:
+ s.AdditionalProperties, err = compile(nil, "additionalProperties")
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if deps, ok := m["dependencies"]; ok {
+ deps := deps.(map[string]interface{})
+ s.Dependencies = make(map[string]interface{}, len(deps))
+ for pname, pvalue := range deps {
+ switch pvalue := pvalue.(type) {
+ case []interface{}:
+ s.Dependencies[pname] = toStrings(pvalue)
+ default:
+ s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ if r.draft.version >= 6 {
+ if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil {
+ return err
+ }
+ if s.Contains, err = loadSchema("contains", nil); err != nil {
+ return err
+ }
+ }
+
+ if r.draft.version >= 7 {
+ if m["if"] != nil {
+ if s.If, err = loadSchema("if", stack); err != nil {
+ return err
+ }
+ if s.Then, err = loadSchema("then", stack); err != nil {
+ return err
+ }
+ if s.Else, err = loadSchema("else", stack); err != nil {
+ return err
+ }
+ }
+ }
+ if r.draft.version >= 2019 {
+ if deps, ok := m["dependentSchemas"]; ok {
+ deps := deps.(map[string]interface{})
+ s.DependentSchemas = make(map[string]*Schema, len(deps))
+ for pname := range deps {
+ s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ if r.draft.version >= 2020 {
+ if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil {
+ return err
+ }
+ if s.Items2020, err = loadSchema("items", nil); err != nil {
+ return err
+ }
+ } else {
+ if items, ok := m["items"]; ok {
+ switch items.(type) {
+ case []interface{}:
+ s.Items, err = loadSchemas("items", nil)
+ if err != nil {
+ return err
+ }
+ if additionalItems, ok := m["additionalItems"]; ok {
+ switch additionalItems := additionalItems.(type) {
+ case bool:
+ s.AdditionalItems = additionalItems
+ case map[string]interface{}:
+ s.AdditionalItems, err = compile(nil, "additionalItems")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ default:
+ s.Items, err = compile(nil, "items")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ }
+
+ // unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020
+ if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) {
+ if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil {
+ return err
+ }
+ if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil {
+ return err
+ }
+ if r.draft.version >= 2020 {
+ // any item in an array that passes validation of the contains schema is considered "evaluated"
+ s.ContainsEval = true
+ }
+ }
+
+ if format, ok := m["format"]; ok {
+ s.Format = format.(string)
+ if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") {
+ if format, ok := c.Formats[s.Format]; ok {
+ s.format = format
+ } else {
+ s.format, _ = Formats[s.Format]
+ }
+ }
+ }
+
+ if c.ExtractAnnotations {
+ if title, ok := m["title"]; ok {
+ s.Title = title.(string)
+ }
+ if description, ok := m["description"]; ok {
+ s.Description = description.(string)
+ }
+ s.Default = m["default"]
+ }
+
+ if r.draft.version >= 6 {
+ if c, ok := m["const"]; ok {
+ s.Constant = []interface{}{c}
+ }
+ }
+
+ if r.draft.version >= 7 {
+ if encoding, ok := m["contentEncoding"]; ok {
+ s.ContentEncoding = encoding.(string)
+ if decoder, ok := c.Decoders[s.ContentEncoding]; ok {
+ s.decoder = decoder
+ } else {
+ s.decoder, _ = Decoders[s.ContentEncoding]
+ }
+ }
+ if mediaType, ok := m["contentMediaType"]; ok {
+ s.ContentMediaType = mediaType.(string)
+ if mediaType, ok := c.MediaTypes[s.ContentMediaType]; ok {
+ s.mediaType = mediaType
+ } else {
+ s.mediaType, _ = MediaTypes[s.ContentMediaType]
+ }
+ if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil {
+ return err
+ }
+ }
+ if c.ExtractAnnotations {
+ if comment, ok := m["$comment"]; ok {
+ s.Comment = comment.(string)
+ }
+ if readOnly, ok := m["readOnly"]; ok {
+ s.ReadOnly = readOnly.(bool)
+ }
+ if writeOnly, ok := m["writeOnly"]; ok {
+ s.WriteOnly = writeOnly.(bool)
+ }
+ if examples, ok := m["examples"]; ok {
+ s.Examples = examples.([]interface{})
+ }
+ }
+ }
+
+ if r.draft.version >= 2019 {
+ if !c.AssertContent {
+ s.decoder = nil
+ s.mediaType = nil
+ s.ContentSchema = nil
+ }
+ if c.ExtractAnnotations {
+ if deprecated, ok := m["deprecated"]; ok {
+ s.Deprecated = deprecated.(bool)
+ }
+ }
+ }
+
+ for name, ext := range c.extensions {
+ es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m)
+ if err != nil {
+ return err
+ }
+ if es != nil {
+ if s.Extensions == nil {
+ s.Extensions = make(map[string]ExtSchema)
+ }
+ s.Extensions[name] = es
+ }
+ }
+
+ return nil
+}
+
+func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error {
+ validate := func(meta *Schema) error {
+ if meta == nil {
+ return nil
+ }
+ return meta.validateValue(v, vloc)
+ }
+
+ if err := validate(r.draft.meta); err != nil {
+ return err
+ }
+ for _, ext := range c.extensions {
+ if err := validate(ext.meta); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func toStrings(arr []interface{}) []string {
+ s := make([]string, len(arr))
+ for i, v := range arr {
+ s[i] = v.(string)
+ }
+ return s
+}
+
+// SchemaRef captures schema and the path referring to it.
+type schemaRef struct {
+ path string // relative-json-pointer to schema
+ schema *Schema // target schema
+ discard bool // true when scope left
+}
+
+func (sr schemaRef) String() string {
+ return fmt.Sprintf("(%s)%v", sr.path, sr.schema)
+}
+
+func checkLoop(stack []schemaRef, sref schemaRef) error {
+ for _, ref := range stack {
+ if ref.schema == sref.schema {
+ return infiniteLoopError(stack, sref)
+ }
+ }
+ return nil
+}
+
+func keywordLocation(stack []schemaRef, path string) string {
+ var loc string
+ for _, ref := range stack[1:] {
+ loc += "/" + ref.path
+ }
+ if path != "" {
+ loc = loc + "/" + path
+ }
+ return loc
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go
new file mode 100644
index 0000000000..7570b8b5a9
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go
@@ -0,0 +1,29 @@
+package jsonschema
+
+import (
+ "encoding/base64"
+ "encoding/json"
+)
+
+// Decoders is a registry of functions, which know how to decode
+// string encoded in specific format.
+//
+// New Decoders can be registered by adding to this map. Key is encoding name,
+// value is function that knows how to decode string in that format.
+var Decoders = map[string]func(string) ([]byte, error){
+ "base64": base64.StdEncoding.DecodeString,
+}
+
+// MediaTypes is a registry of functions, which know how to validate
+// whether the bytes represent data of that mediaType.
+//
+// New mediaTypes can be registered by adding to this map. Key is mediaType name,
+// value is function that knows how to validate that mediaType.
+var MediaTypes = map[string]func([]byte) error{
+ "application/json": validateJSON,
+}
+
+func validateJSON(b []byte) error {
+ var v interface{}
+ return json.Unmarshal(b, &v)
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go
new file mode 100644
index 0000000000..a124262a51
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go
@@ -0,0 +1,49 @@
+/*
+Package jsonschema provides json-schema compilation and validation.
+
+Features:
+ - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4
+ - fully compliant with JSON-Schema-Test-Suite, (excluding some optional)
+ - list of optional tests that are excluded can be found in schema_test.go(variable skipTests)
+ - validates schemas against meta-schema
+ - full support of remote references
+ - support of recursive references between schemas
+ - detects infinite loop in schemas
+ - thread safe validation
+ - rich, intuitive hierarchial error messages with json-pointers to exact location
+ - supports output formats flag, basic and detailed
+ - supports enabling format and content Assertions in draft2019-09 or above
+ - change Compiler.AssertFormat, Compiler.AssertContent to true
+ - compiled schema can be introspected. easier to develop tools like generating go structs given schema
+ - supports user-defined keywords via extensions
+ - implements following formats (supports user-defined)
+ - date-time, date, time, duration (supports leap-second)
+ - uuid, hostname, email
+ - ip-address, ipv4, ipv6
+ - uri, uriref, uri-template(limited validation)
+ - json-pointer, relative-json-pointer
+ - regex, format
+ - implements following contentEncoding (supports user-defined)
+ - base64
+ - implements following contentMediaType (supports user-defined)
+ - application/json
+ - can load from files/http/https/string/[]byte/io.Reader (supports user-defined)
+
+The schema is compiled against the version specified in "$schema" property.
+If "$schema" property is missing, it uses latest draft which currently implemented
+by this library.
+
+You can force to use specific draft, when "$schema" is missing, as follows:
+
+ compiler := jsonschema.NewCompiler()
+ compiler.Draft = jsonschema.Draft4
+
+This package supports loading json-schema from filePath and fileURL.
+
+To load json-schema from HTTPURL, add following import:
+
+ import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
+
+you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA
+*/
+package jsonschema
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go
new file mode 100644
index 0000000000..154fa5837d
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go
@@ -0,0 +1,1454 @@
+package jsonschema
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// A Draft represents json-schema draft
+type Draft struct {
+ version int
+ meta *Schema
+ id string // property name used to represent schema id.
+ boolSchema bool // is boolean valid schema
+ vocab []string // built-in vocab
+ defaultVocab []string // vocabs when $vocabulary is not used
+ subschemas map[string]position
+}
+
+func (d *Draft) URL() string {
+ switch d.version {
+ case 2020:
+ return "https://json-schema.org/draft/2020-12/schema"
+ case 2019:
+ return "https://json-schema.org/draft/2019-09/schema"
+ case 7:
+ return "https://json-schema.org/draft-07/schema"
+ case 6:
+ return "https://json-schema.org/draft-06/schema"
+ case 4:
+ return "https://json-schema.org/draft-04/schema"
+ }
+ return ""
+}
+
+func (d *Draft) String() string {
+ return fmt.Sprintf("Draft%d", d.version)
+}
+
+func (d *Draft) loadMeta(url, schema string) {
+ c := NewCompiler()
+ c.AssertFormat = true
+ if err := c.AddResource(url, strings.NewReader(schema)); err != nil {
+ panic(err)
+ }
+ d.meta = c.MustCompile(url)
+ d.meta.meta = d.meta
+}
+
+func (d *Draft) getID(sch interface{}) string {
+ m, ok := sch.(map[string]interface{})
+ if !ok {
+ return ""
+ }
+ if _, ok := m["$ref"]; ok && d.version <= 7 {
+ // $ref prevents a sibling id from changing the base uri
+ return ""
+ }
+ v, ok := m[d.id]
+ if !ok {
+ return ""
+ }
+ id, ok := v.(string)
+ if !ok {
+ return ""
+ }
+ return id
+}
+
+func (d *Draft) resolveID(base string, sch interface{}) (string, error) {
+ id, _ := split(d.getID(sch)) // strip fragment
+ if id == "" {
+ return "", nil
+ }
+ url, err := resolveURL(base, id)
+ url, _ = split(url) // strip fragment
+ return url, err
+}
+
+func (d *Draft) anchors(sch interface{}) []string {
+ m, ok := sch.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+
+ var anchors []string
+
+ // before draft2019, anchor is specified in id
+ _, f := split(d.getID(m))
+ if f != "#" {
+ anchors = append(anchors, f[1:])
+ }
+
+ if v, ok := m["$anchor"]; ok && d.version >= 2019 {
+ anchors = append(anchors, v.(string))
+ }
+ if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 {
+ anchors = append(anchors, v.(string))
+ }
+ return anchors
+}
+
+// listSubschemas collects subschemas in r into rr.
+func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error {
+ add := func(loc string, sch interface{}) error {
+ url, err := d.resolveID(base, sch)
+ if err != nil {
+ return err
+ }
+ floc := r.floc + "/" + loc
+ sr := &resource{url: url, floc: floc, doc: sch}
+ rr[floc] = sr
+
+ base := base
+ if url != "" {
+ base = url
+ }
+ return d.listSubschemas(sr, base, rr)
+ }
+
+ sch, ok := r.doc.(map[string]interface{})
+ if !ok {
+ return nil
+ }
+ for kw, pos := range d.subschemas {
+ v, ok := sch[kw]
+ if !ok {
+ continue
+ }
+ if pos&self != 0 {
+ switch v := v.(type) {
+ case map[string]interface{}:
+ if err := add(kw, v); err != nil {
+ return err
+ }
+ case bool:
+ if d.boolSchema {
+ if err := add(kw, v); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if pos&item != 0 {
+ if v, ok := v.([]interface{}); ok {
+ for i, item := range v {
+ if err := add(kw+"/"+strconv.Itoa(i), item); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if pos&prop != 0 {
+ if v, ok := v.(map[string]interface{}); ok {
+ for pname, pval := range v {
+ if err := add(kw+"/"+escape(pname), pval); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// isVocab tells whether url is built-in vocab.
+func (d *Draft) isVocab(url string) bool {
+ for _, v := range d.vocab {
+ if url == v {
+ return true
+ }
+ }
+ return false
+}
+
+type position uint
+
+const (
+ self position = 1 << iota
+ prop
+ item
+)
+
+// supported drafts
+var (
+ Draft4 = &Draft{version: 4, id: "id", boolSchema: false}
+ Draft6 = &Draft{version: 6, id: "$id", boolSchema: true}
+ Draft7 = &Draft{version: 7, id: "$id", boolSchema: true}
+ Draft2019 = &Draft{
+ version: 2019,
+ id: "$id",
+ boolSchema: true,
+ vocab: []string{
+ "https://json-schema.org/draft/2019-09/vocab/core",
+ "https://json-schema.org/draft/2019-09/vocab/applicator",
+ "https://json-schema.org/draft/2019-09/vocab/validation",
+ "https://json-schema.org/draft/2019-09/vocab/meta-data",
+ "https://json-schema.org/draft/2019-09/vocab/format",
+ "https://json-schema.org/draft/2019-09/vocab/content",
+ },
+ defaultVocab: []string{
+ "https://json-schema.org/draft/2019-09/vocab/core",
+ "https://json-schema.org/draft/2019-09/vocab/applicator",
+ "https://json-schema.org/draft/2019-09/vocab/validation",
+ },
+ }
+ Draft2020 = &Draft{
+ version: 2020,
+ id: "$id",
+ boolSchema: true,
+ vocab: []string{
+ "https://json-schema.org/draft/2020-12/vocab/core",
+ "https://json-schema.org/draft/2020-12/vocab/applicator",
+ "https://json-schema.org/draft/2020-12/vocab/unevaluated",
+ "https://json-schema.org/draft/2020-12/vocab/validation",
+ "https://json-schema.org/draft/2020-12/vocab/meta-data",
+ "https://json-schema.org/draft/2020-12/vocab/format-annotation",
+ "https://json-schema.org/draft/2020-12/vocab/format-assertion",
+ "https://json-schema.org/draft/2020-12/vocab/content",
+ },
+ defaultVocab: []string{
+ "https://json-schema.org/draft/2020-12/vocab/core",
+ "https://json-schema.org/draft/2020-12/vocab/applicator",
+ "https://json-schema.org/draft/2020-12/vocab/unevaluated",
+ "https://json-schema.org/draft/2020-12/vocab/validation",
+ },
+ }
+
+ latest = Draft2020
+)
+
+func findDraft(url string) *Draft {
+ if strings.HasPrefix(url, "http://") {
+ url = "https://" + strings.TrimPrefix(url, "http://")
+ }
+ if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") {
+ url = url[:strings.IndexByte(url, '#')]
+ }
+ switch url {
+ case "https://json-schema.org/schema":
+ return latest
+ case "https://json-schema.org/draft/2020-12/schema":
+ return Draft2020
+ case "https://json-schema.org/draft/2019-09/schema":
+ return Draft2019
+ case "https://json-schema.org/draft-07/schema":
+ return Draft7
+ case "https://json-schema.org/draft-06/schema":
+ return Draft6
+ case "https://json-schema.org/draft-04/schema":
+ return Draft4
+ }
+ return nil
+}
+
+func init() {
+ subschemas := map[string]position{
+ // type agnostic
+ "definitions": prop,
+ "not": self,
+ "allOf": item,
+ "anyOf": item,
+ "oneOf": item,
+ // object
+ "properties": prop,
+ "additionalProperties": self,
+ "patternProperties": prop,
+ // array
+ "items": self | item,
+ "additionalItems": self,
+ "dependencies": prop,
+ }
+ Draft4.subschemas = clone(subschemas)
+
+ subschemas["propertyNames"] = self
+ subschemas["contains"] = self
+ Draft6.subschemas = clone(subschemas)
+
+ subschemas["if"] = self
+ subschemas["then"] = self
+ subschemas["else"] = self
+ Draft7.subschemas = clone(subschemas)
+
+ subschemas["$defs"] = prop
+ subschemas["dependentSchemas"] = prop
+ subschemas["unevaluatedProperties"] = self
+ subschemas["unevaluatedItems"] = self
+ subschemas["contentSchema"] = self
+ Draft2019.subschemas = clone(subschemas)
+
+ subschemas["prefixItems"] = item
+ Draft2020.subschemas = clone(subschemas)
+
+ Draft4.loadMeta("http://json-schema.org/draft-04/schema", `{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "format": "uriref"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "regexProperties": true,
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "regexProperties": { "type": "boolean" },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" },
+ "format": { "type": "string" },
+ "$ref": { "type": "string" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+ }`)
+ Draft6.loadMeta("http://json-schema.org/draft-06/schema", `{
+ "$schema": "http://json-schema.org/draft-06/schema#",
+ "$id": "http://json-schema.org/draft-06/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "regexProperties": true,
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": {},
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": {}
+ }`)
+ Draft7.loadMeta("http://json-schema.org/draft-07/schema", `{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://json-schema.org/draft-07/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "writeOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": true
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true,
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "contentMediaType": { "type": "string" },
+ "contentEncoding": { "type": "string" },
+ "if": { "$ref": "#" },
+ "then": { "$ref": "#" },
+ "else": { "$ref": "#" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": true
+ }`)
+ Draft2019.loadMeta("https://json-schema.org/draft/2019-09/schema", `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/schema",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/core": true,
+ "https://json-schema.org/draft/2019-09/vocab/applicator": true,
+ "https://json-schema.org/draft/2019-09/vocab/validation": true,
+ "https://json-schema.org/draft/2019-09/vocab/meta-data": true,
+ "https://json-schema.org/draft/2019-09/vocab/format": false,
+ "https://json-schema.org/draft/2019-09/vocab/content": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Core and Validation specifications meta-schema",
+ "allOf": [
+ {"$ref": "meta/core"},
+ {"$ref": "meta/applicator"},
+ {"$ref": "meta/validation"},
+ {"$ref": "meta/meta-data"},
+ {"$ref": "meta/format"},
+ {"$ref": "meta/content"}
+ ],
+ "type": ["object", "boolean"],
+ "properties": {
+ "definitions": {
+ "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.",
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"",
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$recursiveRef": "#" },
+ { "$ref": "meta/validation#/$defs/stringArray" }
+ ]
+ }
+ }
+ }
+ }`)
+ Draft2020.loadMeta("https://json-schema.org/draft/2020-12/schema", `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/schema",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/core": true,
+ "https://json-schema.org/draft/2020-12/vocab/applicator": true,
+ "https://json-schema.org/draft/2020-12/vocab/unevaluated": true,
+ "https://json-schema.org/draft/2020-12/vocab/validation": true,
+ "https://json-schema.org/draft/2020-12/vocab/meta-data": true,
+ "https://json-schema.org/draft/2020-12/vocab/format-annotation": true,
+ "https://json-schema.org/draft/2020-12/vocab/content": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Core and Validation specifications meta-schema",
+ "allOf": [
+ {"$ref": "meta/core"},
+ {"$ref": "meta/applicator"},
+ {"$ref": "meta/unevaluated"},
+ {"$ref": "meta/validation"},
+ {"$ref": "meta/meta-data"},
+ {"$ref": "meta/format-annotation"},
+ {"$ref": "meta/content"}
+ ],
+ "type": ["object", "boolean"],
+ "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.",
+ "properties": {
+ "definitions": {
+ "$comment": "\"definitions\" has been replaced by \"$defs\".",
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "deprecated": true,
+ "default": {}
+ },
+ "dependencies": {
+ "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.",
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$dynamicRef": "#meta" },
+ { "$ref": "meta/validation#/$defs/stringArray" }
+ ]
+ },
+ "deprecated": true,
+ "default": {}
+ },
+ "$recursiveAnchor": {
+ "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".",
+ "$ref": "meta/core#/$defs/anchorString",
+ "deprecated": true
+ },
+ "$recursiveRef": {
+ "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".",
+ "$ref": "meta/core#/$defs/uriReferenceString",
+ "deprecated": true
+ }
+ }
+ }`)
+}
+
+var vocabSchemas = map[string]string{
+ "https://json-schema.org/draft/2019-09/meta/core": `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/core",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/core": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Core vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference",
+ "$comment": "Non-empty fragments not allowed.",
+ "pattern": "^[^#]*#?$"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$anchor": {
+ "type": "string",
+ "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$recursiveRef": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$recursiveAnchor": {
+ "type": "boolean",
+ "default": false
+ },
+ "$vocabulary": {
+ "type": "object",
+ "propertyNames": {
+ "type": "string",
+ "format": "uri"
+ },
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "$defs": {
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "default": {}
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2019-09/meta/applicator": `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/applicator",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/applicator": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Applicator vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "additionalItems": { "$recursiveRef": "#" },
+ "unevaluatedItems": { "$recursiveRef": "#" },
+ "items": {
+ "anyOf": [
+ { "$recursiveRef": "#" },
+ { "$ref": "#/$defs/schemaArray" }
+ ]
+ },
+ "contains": { "$recursiveRef": "#" },
+ "additionalProperties": { "$recursiveRef": "#" },
+ "unevaluatedProperties": { "$recursiveRef": "#" },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependentSchemas": {
+ "type": "object",
+ "additionalProperties": {
+ "$recursiveRef": "#"
+ }
+ },
+ "propertyNames": { "$recursiveRef": "#" },
+ "if": { "$recursiveRef": "#" },
+ "then": { "$recursiveRef": "#" },
+ "else": { "$recursiveRef": "#" },
+ "allOf": { "$ref": "#/$defs/schemaArray" },
+ "anyOf": { "$ref": "#/$defs/schemaArray" },
+ "oneOf": { "$ref": "#/$defs/schemaArray" },
+ "not": { "$recursiveRef": "#" }
+ },
+ "$defs": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$recursiveRef": "#" }
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2019-09/meta/validation": `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/validation",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/validation": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Validation vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "maxItems": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxContains": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minContains": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 1
+ },
+ "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/$defs/stringArray" },
+ "dependentRequired": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/$defs/stringArray"
+ }
+ },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/$defs/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/$defs/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ }
+ },
+ "$defs": {
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 0
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2019-09/meta/meta-data": `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/meta-data",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/meta-data": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Meta-data vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "writeOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2019-09/meta/format": `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/format",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/format": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Format vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "format": { "type": "string" }
+ }
+ }`,
+ "https://json-schema.org/draft/2019-09/meta/content": `{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/content",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/content": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Content vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "contentMediaType": { "type": "string" },
+ "contentEncoding": { "type": "string" },
+ "contentSchema": { "$recursiveRef": "#" }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/core": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/core",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/core": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Core vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "$ref": "#/$defs/uriReferenceString",
+ "$comment": "Non-empty fragments not allowed.",
+ "pattern": "^[^#]*#?$"
+ },
+ "$schema": { "$ref": "#/$defs/uriString" },
+ "$ref": { "$ref": "#/$defs/uriReferenceString" },
+ "$anchor": { "$ref": "#/$defs/anchorString" },
+ "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" },
+ "$dynamicAnchor": { "$ref": "#/$defs/anchorString" },
+ "$vocabulary": {
+ "type": "object",
+ "propertyNames": { "$ref": "#/$defs/uriString" },
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "$defs": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" }
+ }
+ },
+ "$defs": {
+ "anchorString": {
+ "type": "string",
+ "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$"
+ },
+ "uriString": {
+ "type": "string",
+ "format": "uri"
+ },
+ "uriReferenceString": {
+ "type": "string",
+ "format": "uri-reference"
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/applicator": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/applicator",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/applicator": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Applicator vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "prefixItems": { "$ref": "#/$defs/schemaArray" },
+ "items": { "$dynamicRef": "#meta" },
+ "contains": { "$dynamicRef": "#meta" },
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependentSchemas": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "default": {}
+ },
+ "propertyNames": { "$dynamicRef": "#meta" },
+ "if": { "$dynamicRef": "#meta" },
+ "then": { "$dynamicRef": "#meta" },
+ "else": { "$dynamicRef": "#meta" },
+ "allOf": { "$ref": "#/$defs/schemaArray" },
+ "anyOf": { "$ref": "#/$defs/schemaArray" },
+ "oneOf": { "$ref": "#/$defs/schemaArray" },
+ "not": { "$dynamicRef": "#meta" }
+ },
+ "$defs": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$dynamicRef": "#meta" }
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/unevaluated": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/unevaluated": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Unevaluated applicator vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "unevaluatedItems": { "$dynamicRef": "#meta" },
+ "unevaluatedProperties": { "$dynamicRef": "#meta" }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/validation": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/validation",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/validation": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Validation vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "type": {
+ "anyOf": [
+ { "$ref": "#/$defs/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/$defs/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "maxItems": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxContains": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minContains": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 1
+ },
+ "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/$defs/stringArray" },
+ "dependentRequired": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/$defs/stringArray"
+ }
+ }
+ },
+ "$defs": {
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 0
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/meta-data": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/meta-data",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/meta-data": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Meta-data vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "writeOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/format-annotation": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/format-annotation": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Format vocabulary meta-schema for annotation results",
+ "type": ["object", "boolean"],
+ "properties": {
+ "format": { "type": "string" }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/format-assertion": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/format-assertion": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Format vocabulary meta-schema for assertion results",
+ "type": ["object", "boolean"],
+ "properties": {
+ "format": { "type": "string" }
+ }
+ }`,
+ "https://json-schema.org/draft/2020-12/meta/content": `{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/content",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/content": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Content vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "contentEncoding": { "type": "string" },
+ "contentMediaType": { "type": "string" },
+ "contentSchema": { "$dynamicRef": "#meta" }
+ }
+ }`,
+}
+
+func clone(m map[string]position) map[string]position {
+ mm := make(map[string]position)
+ for k, v := range m {
+ mm[k] = v
+ }
+ return mm
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go
new file mode 100644
index 0000000000..deaded89f7
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go
@@ -0,0 +1,129 @@
+package jsonschema
+
+import (
+ "fmt"
+ "strings"
+)
+
+// InvalidJSONTypeError is the error type returned by ValidateInterface.
+// this tells that specified go object is not valid jsonType.
+type InvalidJSONTypeError string
+
+func (e InvalidJSONTypeError) Error() string {
+ return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e))
+}
+
+// InfiniteLoopError is returned by Compile/Validate.
+// this gives url#keywordLocation that lead to infinity loop.
+type InfiniteLoopError string
+
+func (e InfiniteLoopError) Error() string {
+ return "jsonschema: infinite loop " + string(e)
+}
+
+func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError {
+ var path string
+ for _, ref := range stack {
+ if path == "" {
+ path += ref.schema.Location
+ } else {
+ path += "/" + ref.path
+ }
+ }
+ return InfiniteLoopError(path + "/" + sref.path)
+}
+
+// SchemaError is the error type returned by Compile.
+type SchemaError struct {
+ // SchemaURL is the url to json-schema that filed to compile.
+ // This is helpful, if your schema refers to external schemas
+ SchemaURL string
+
+ // Err is the error that occurred during compilation.
+ // It could be ValidationError, because compilation validates
+ // given schema against the json meta-schema
+ Err error
+}
+
+func (se *SchemaError) Unwrap() error {
+ return se.Err
+}
+
+func (se *SchemaError) Error() string {
+ s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL)
+ if se.Err != nil {
+ return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: "))
+ }
+ return s
+}
+
+func (se *SchemaError) GoString() string {
+ if _, ok := se.Err.(*ValidationError); ok {
+ return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err)
+ }
+ return se.Error()
+}
+
+// ValidationError is the error type returned by Validate.
+type ValidationError struct {
+ KeywordLocation string // validation path of validating keyword or schema
+ AbsoluteKeywordLocation string // absolute location of validating keyword or schema
+ InstanceLocation string // location of the json value within the instance being validated
+ Message string // describes error
+ Causes []*ValidationError // nested validation errors
+}
+
+func (ve *ValidationError) add(causes ...error) error {
+ for _, cause := range causes {
+ ve.Causes = append(ve.Causes, cause.(*ValidationError))
+ }
+ return ve
+}
+
+func (ve *ValidationError) causes(err error) error {
+ if err := err.(*ValidationError); err.Message == "" {
+ ve.Causes = err.Causes
+ } else {
+ ve.add(err)
+ }
+ return ve
+}
+
+func (ve *ValidationError) Error() string {
+ leaf := ve
+ for len(leaf.Causes) > 0 {
+ leaf = leaf.Causes[0]
+ }
+ u, _ := split(ve.AbsoluteKeywordLocation)
+ return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message)
+}
+
+func (ve *ValidationError) GoString() string {
+ sloc := ve.AbsoluteKeywordLocation
+ sloc = sloc[strings.IndexByte(sloc, '#')+1:]
+ msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message)
+ for _, c := range ve.Causes {
+ for _, line := range strings.Split(c.GoString(), "\n") {
+ msg += "\n " + line
+ }
+ }
+ return msg
+}
+
+func joinPtr(ptr1, ptr2 string) string {
+ if len(ptr1) == 0 {
+ return ptr2
+ }
+ if len(ptr2) == 0 {
+ return ptr1
+ }
+ return ptr1 + "/" + ptr2
+}
+
+// quote returns single-quoted string
+func quote(s string) string {
+ s = fmt.Sprintf("%q", s)
+ s = strings.ReplaceAll(s, `\"`, `"`)
+ s = strings.ReplaceAll(s, `'`, `\'`)
+ return "'" + s[1:len(s)-1] + "'"
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go
new file mode 100644
index 0000000000..452ba118c5
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go
@@ -0,0 +1,116 @@
+package jsonschema
+
+// ExtCompiler compiles custom keyword(s) into ExtSchema.
+type ExtCompiler interface {
+ // Compile compiles the custom keywords in schema m and returns its compiled representation.
+ // if the schema m does not contain the keywords defined by this extension,
+ // compiled representation nil should be returned.
+ Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error)
+}
+
+// ExtSchema is schema representation of custom keyword(s)
+type ExtSchema interface {
+ // Validate validates the json value v with this ExtSchema.
+ // Returned error must be *ValidationError.
+ Validate(ctx ValidationContext, v interface{}) error
+}
+
+type extension struct {
+ meta *Schema
+ compiler ExtCompiler
+}
+
+// RegisterExtension registers custom keyword(s) into this compiler.
+//
+// name is extension name, used only to avoid name collisions.
+// meta captures the metaschema for the new keywords.
+// This is used to validate the schema before calling ext.Compile.
+func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) {
+ c.extensions[name] = extension{meta, ext}
+}
+
+// CompilerContext ---
+
+// CompilerContext provides additional context required in compiling for extension.
+type CompilerContext struct {
+ c *Compiler
+ r *resource
+ stack []schemaRef
+ res *resource
+}
+
+// Compile compiles given value at ptr into *Schema. This is useful in implementing
+// keyword like allOf/not/patternProperties.
+//
+// schPath is the relative-json-pointer to the schema to be compiled from parent schema.
+//
+// applicableOnSameInstance tells whether current schema and the given schema
+// are applied on same instance value. this is used to detect infinite loop in schema.
+func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) {
+ var stack []schemaRef
+ if applicableOnSameInstance {
+ stack = ctx.stack
+ }
+ return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath)
+}
+
+// CompileRef compiles the schema referenced by ref uri
+//
+// refPath is the relative-json-pointer to ref.
+//
+// applicableOnSameInstance tells whether current schema and the given schema
+// are applied on same instance value. this is used to detect infinite loop in schema.
+func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) {
+ var stack []schemaRef
+ if applicableOnSameInstance {
+ stack = ctx.stack
+ }
+ return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref)
+}
+
+// ValidationContext ---
+
+// ValidationContext provides additional context required in validating for extension.
+type ValidationContext struct {
+ result validationResult
+ validate func(sch *Schema, schPath string, v interface{}, vpath string) error
+ validateInplace func(sch *Schema, schPath string) error
+ validationError func(keywordPath string, format string, a ...interface{}) *ValidationError
+}
+
+// EvaluatedProp marks given property of object as evaluated.
+func (ctx ValidationContext) EvaluatedProp(prop string) {
+ delete(ctx.result.unevalProps, prop)
+}
+
+// EvaluatedItem marks given index of array as evaluated.
+func (ctx ValidationContext) EvaluatedItem(index int) {
+ delete(ctx.result.unevalItems, index)
+}
+
+// Validate validates schema s with value v. Extension must use this method instead of
+// *Schema.ValidateInterface method. This will be useful in implementing keywords like
+// allOf/oneOf
+//
+// spath is relative-json-pointer to s
+// vpath is relative-json-pointer to v.
+func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error {
+ if vpath == "" {
+ return ctx.validateInplace(s, spath)
+ }
+ return ctx.validate(s, spath, v, vpath)
+}
+
+// Error used to construct validation error by extensions.
+//
+// keywordPath is relative-json-pointer to keyword.
+func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError {
+ return ctx.validationError(keywordPath, format, a...)
+}
+
+// Group is used by extensions to group multiple errors as causes to parent error.
+// This is useful in implementing keywords like allOf where each schema specified
+// in allOf can result a validationError.
+func (ValidationError) Group(parent *ValidationError, causes ...error) error {
+ return parent.add(causes...)
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go
new file mode 100644
index 0000000000..05686073f0
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go
@@ -0,0 +1,567 @@
+package jsonschema
+
+import (
+ "errors"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Formats is a registry of functions, which know how to validate
+// a specific format.
+//
+// New Formats can be registered by adding to this map. Key is format name,
+// value is function that knows how to validate that format.
+var Formats = map[string]func(interface{}) bool{
+ "date-time": isDateTime,
+ "date": isDate,
+ "time": isTime,
+ "duration": isDuration,
+ "period": isPeriod,
+ "hostname": isHostname,
+ "email": isEmail,
+ "ip-address": isIPV4,
+ "ipv4": isIPV4,
+ "ipv6": isIPV6,
+ "uri": isURI,
+ "iri": isURI,
+ "uri-reference": isURIReference,
+ "uriref": isURIReference,
+ "iri-reference": isURIReference,
+ "uri-template": isURITemplate,
+ "regex": isRegex,
+ "json-pointer": isJSONPointer,
+ "relative-json-pointer": isRelativeJSONPointer,
+ "uuid": isUUID,
+}
+
+// isDateTime tells whether given string is a valid date representation
+// as defined by RFC 3339, section 5.6.
+//
+// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details
+func isDateTime(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ
+ return false
+ }
+ if s[10] != 'T' && s[10] != 't' {
+ return false
+ }
+ return isDate(s[:10]) && isTime(s[11:])
+}
+
+// isDate tells whether given string is a valid full-date production
+// as defined by RFC 3339, section 5.6.
+//
+// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details
+func isDate(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ _, err := time.Parse("2006-01-02", s)
+ return err == nil
+}
+
+// isTime tells whether given string is a valid full-time production
+// as defined by RFC 3339, section 5.6.
+//
+// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details
+func isTime(v interface{}) bool {
+ str, ok := v.(string)
+ if !ok {
+ return true
+ }
+
+ // golang time package does not support leap seconds.
+ // so we are parsing it manually here.
+
+ // hh:mm:ss
+ // 01234567
+ if len(str) < 9 || str[2] != ':' || str[5] != ':' {
+ return false
+ }
+ isInRange := func(str string, min, max int) (int, bool) {
+ n, err := strconv.Atoi(str)
+ if err != nil {
+ return 0, false
+ }
+ if n < min || n > max {
+ return 0, false
+ }
+ return n, true
+ }
+ var h, m, s int
+ if h, ok = isInRange(str[0:2], 0, 23); !ok {
+ return false
+ }
+ if m, ok = isInRange(str[3:5], 0, 59); !ok {
+ return false
+ }
+ if s, ok = isInRange(str[6:8], 0, 60); !ok {
+ return false
+ }
+ str = str[8:]
+
+ // parse secfrac if present
+ if str[0] == '.' {
+ // dot following more than one digit
+ str = str[1:]
+ var numDigits int
+ for str != "" {
+ if str[0] < '0' || str[0] > '9' {
+ break
+ }
+ numDigits++
+ str = str[1:]
+ }
+ if numDigits == 0 {
+ return false
+ }
+ }
+
+ if len(str) == 0 {
+ return false
+ }
+
+ if str[0] == 'z' || str[0] == 'Z' {
+ if len(str) != 1 {
+ return false
+ }
+ } else {
+ // time-numoffset
+ // +hh:mm
+ // 012345
+ if len(str) != 6 || str[3] != ':' {
+ return false
+ }
+
+ var sign int
+ if str[0] == '+' {
+ sign = -1
+ } else if str[0] == '-' {
+ sign = +1
+ } else {
+ return false
+ }
+
+ var zh, zm int
+ if zh, ok = isInRange(str[1:3], 0, 23); !ok {
+ return false
+ }
+ if zm, ok = isInRange(str[4:6], 0, 59); !ok {
+ return false
+ }
+
+ // apply timezone offset
+ hm := (h*60 + m) + sign*(zh*60+zm)
+ if hm < 0 {
+ hm += 24 * 60
+ }
+ h, m = hm/60, hm%60
+ }
+
+ // check leapsecond
+ if s == 60 { // leap second
+ if h != 23 || m != 59 {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isDuration tells whether given string is a valid duration format
+// from the ISO 8601 ABNF as given in Appendix A of RFC 3339.
+//
+// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details
+func isDuration(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ if len(s) == 0 || s[0] != 'P' {
+ return false
+ }
+ s = s[1:]
+ parseUnits := func() (units string, ok bool) {
+ for len(s) > 0 && s[0] != 'T' {
+ digits := false
+ for {
+ if len(s) == 0 {
+ break
+ }
+ if s[0] < '0' || s[0] > '9' {
+ break
+ }
+ digits = true
+ s = s[1:]
+ }
+ if !digits || len(s) == 0 {
+ return units, false
+ }
+ units += s[:1]
+ s = s[1:]
+ }
+ return units, true
+ }
+ units, ok := parseUnits()
+ if !ok {
+ return false
+ }
+ if units == "W" {
+ return len(s) == 0 // P_W
+ }
+ if len(units) > 0 {
+ if strings.Index("YMD", units) == -1 {
+ return false
+ }
+ if len(s) == 0 {
+ return true // "P" dur-date
+ }
+ }
+ if len(s) == 0 || s[0] != 'T' {
+ return false
+ }
+ s = s[1:]
+ units, ok = parseUnits()
+ return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1
+}
+
+// isPeriod tells whether given string is a valid period format
+// from the ISO 8601 ABNF as given in Appendix A of RFC 3339.
+//
+// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details
+func isPeriod(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ slash := strings.IndexByte(s, '/')
+ if slash == -1 {
+ return false
+ }
+ start, end := s[:slash], s[slash+1:]
+ if isDateTime(start) {
+ return isDateTime(end) || isDuration(end)
+ }
+ return isDuration(start) && isDateTime(end)
+}
+
+// isHostname tells whether given string is a valid representation
+// for an Internet host name, as defined by RFC 1034 section 3.1 and
+// RFC 1123 section 2.1.
+//
+// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details.
+func isHostname(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters
+ s = strings.TrimSuffix(s, ".")
+ if len(s) > 253 {
+ return false
+ }
+
+ // Hostnames are composed of series of labels concatenated with dots, as are all domain names
+ for _, label := range strings.Split(s, ".") {
+ // Each label must be from 1 to 63 characters long
+ if labelLen := len(label); labelLen < 1 || labelLen > 63 {
+ return false
+ }
+
+ // labels must not start with a hyphen
+ // RFC 1123 section 2.1: restriction on the first character
+ // is relaxed to allow either a letter or a digit
+ if first := s[0]; first == '-' {
+ return false
+ }
+
+ // must not end with a hyphen
+ if label[len(label)-1] == '-' {
+ return false
+ }
+
+ // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner),
+ // the digits '0' through '9', and the hyphen ('-')
+ for _, c := range label {
+ if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// isEmail tells whether given string is a valid Internet email address
+// as defined by RFC 5322, section 3.4.1.
+//
+// See https://en.wikipedia.org/wiki/Email_address, for details.
+func isEmail(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ // entire email address to be no more than 254 characters long
+ if len(s) > 254 {
+ return false
+ }
+
+ // email address is generally recognized as having two parts joined with an at-sign
+ at := strings.LastIndexByte(s, '@')
+ if at == -1 {
+ return false
+ }
+ local := s[0:at]
+ domain := s[at+1:]
+
+ // local part may be up to 64 characters long
+ if len(local) > 64 {
+ return false
+ }
+
+ // domain if enclosed in brackets, must match an IP address
+ if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' {
+ ip := domain[1 : len(domain)-1]
+ if strings.HasPrefix(ip, "IPv6:") {
+ return isIPV6(strings.TrimPrefix(ip, "IPv6:"))
+ }
+ return isIPV4(ip)
+ }
+
+ // domain must match the requirements for a hostname
+ if !isHostname(domain) {
+ return false
+ }
+
+ _, err := mail.ParseAddress(s)
+ return err == nil
+}
+
+// isIPV4 tells whether given string is a valid representation of an IPv4 address
+// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2.
+func isIPV4(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ groups := strings.Split(s, ".")
+ if len(groups) != 4 {
+ return false
+ }
+ for _, group := range groups {
+ n, err := strconv.Atoi(group)
+ if err != nil {
+ return false
+ }
+ if n < 0 || n > 255 {
+ return false
+ }
+ if n != 0 && group[0] == '0' {
+ return false // leading zeroes should be rejected, as they are treated as octals
+ }
+ }
+ return true
+}
+
+// isIPV6 tells whether given string is a valid representation of an IPv6 address
+// as defined in RFC 2373, section 2.2.
+func isIPV6(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ if !strings.Contains(s, ":") {
+ return false
+ }
+ return net.ParseIP(s) != nil
+}
+
+// isURI tells whether given string is valid URI, according to RFC 3986.
+func isURI(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ u, err := urlParse(s)
+ return err == nil && u.IsAbs()
+}
+
+func urlParse(s string) (*url.URL, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // if hostname is ipv6, validate it
+ hostname := u.Hostname()
+ if strings.IndexByte(hostname, ':') != -1 {
+ if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 {
+ return nil, errors.New("ipv6 address is not enclosed in brackets")
+ }
+ if !isIPV6(hostname) {
+ return nil, errors.New("invalid ipv6 address")
+ }
+ }
+ return u, nil
+}
+
+// isURIReference tells whether given string is a valid URI Reference
+// (either a URI or a relative-reference), according to RFC 3986.
+func isURIReference(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ _, err := urlParse(s)
+ return err == nil && !strings.Contains(s, `\`)
+}
+
+// isURITemplate tells whether given string is a valid URI Template
+// according to RFC6570.
+//
+// Current implementation does minimal validation.
+func isURITemplate(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ u, err := urlParse(s)
+ if err != nil {
+ return false
+ }
+ for _, item := range strings.Split(u.RawPath, "/") {
+ depth := 0
+ for _, ch := range item {
+ switch ch {
+ case '{':
+ depth++
+ if depth != 1 {
+ return false
+ }
+ case '}':
+ depth--
+ if depth != 0 {
+ return false
+ }
+ }
+ }
+ if depth != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// isRegex tells whether given string is a valid regular expression,
+// according to the ECMA 262 regular expression dialect.
+//
+// The implementation uses go-lang regexp package.
+func isRegex(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ _, err := regexp.Compile(s)
+ return err == nil
+}
+
+// isJSONPointer tells whether given string is a valid JSON Pointer.
+//
+// Note: It returns false for JSON Pointer URI fragments.
+func isJSONPointer(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ if s != "" && !strings.HasPrefix(s, "/") {
+ return false
+ }
+ for _, item := range strings.Split(s, "/") {
+ for i := 0; i < len(item); i++ {
+ if item[i] == '~' {
+ if i == len(item)-1 {
+ return false
+ }
+ switch item[i+1] {
+ case '0', '1':
+ // valid
+ default:
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer.
+//
+// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
+func isRelativeJSONPointer(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ if s == "" {
+ return false
+ }
+ if s[0] == '0' {
+ s = s[1:]
+ } else if s[0] >= '0' && s[0] <= '9' {
+ for s != "" && s[0] >= '0' && s[0] <= '9' {
+ s = s[1:]
+ }
+ } else {
+ return false
+ }
+ return s == "#" || isJSONPointer(s)
+}
+
+// isUUID tells whether given string is a valid uuid format
+// as specified in RFC4122.
+//
+// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details
+func isUUID(v interface{}) bool {
+ s, ok := v.(string)
+ if !ok {
+ return true
+ }
+ parseHex := func(n int) bool {
+ for n > 0 {
+ if len(s) == 0 {
+ return false
+ }
+ hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F')
+ if !hex {
+ return false
+ }
+ s = s[1:]
+ n--
+ }
+ return true
+ }
+ groups := []int{8, 4, 4, 4, 12}
+ for i, numDigits := range groups {
+ if !parseHex(numDigits) {
+ return false
+ }
+ if i == len(groups)-1 {
+ break
+ }
+ if len(s) == 0 || s[0] != '-' {
+ return false
+ }
+ s = s[1:]
+ }
+ return len(s) == 0
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go
new file mode 100644
index 0000000000..4198cfe37c
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go
@@ -0,0 +1,38 @@
+// Package httploader implements loader.Loader for http/https url.
+//
+// The package is typically only imported for the side effect of
+// registering its Loaders.
+//
+// To use httploader, link this package into your program:
+//
+// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader"
+package httploader
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/santhosh-tekuri/jsonschema/v5"
+)
+
+// Client is the default HTTP Client used to Get the resource.
+var Client = http.DefaultClient
+
+// Load loads resource from given http(s) url.
+func Load(url string) (io.ReadCloser, error) {
+ resp, err := Client.Get(url)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ _ = resp.Body.Close()
+ return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode)
+ }
+ return resp.Body, nil
+}
+
+func init() {
+ jsonschema.Loaders["http"] = Load
+ jsonschema.Loaders["https"] = Load
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go
new file mode 100644
index 0000000000..c94195c335
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go
@@ -0,0 +1,60 @@
+package jsonschema
+
+import (
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+func loadFileURL(s string) (io.ReadCloser, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ f := u.Path
+ if runtime.GOOS == "windows" {
+ f = strings.TrimPrefix(f, "/")
+ f = filepath.FromSlash(f)
+ }
+ return os.Open(f)
+}
+
+// Loaders is a registry of functions, which know how to load
+// absolute url of specific schema.
+//
+// New loaders can be registered by adding to this map. Key is schema,
+// value is function that knows how to load url of that schema
+var Loaders = map[string]func(url string) (io.ReadCloser, error){
+ "file": loadFileURL,
+}
+
+// LoaderNotFoundError is the error type returned by Load function.
+// It tells that no Loader is registered for that URL Scheme.
+type LoaderNotFoundError string
+
+func (e LoaderNotFoundError) Error() string {
+ return fmt.Sprintf("jsonschema: no Loader found for %s", string(e))
+}
+
+// LoadURL loads document at given absolute URL. The default implementation
+// uses Loaders registry to lookup by schema and uses that loader.
+//
+// Users can change this variable, if they would like to take complete
+// responsibility of loading given URL. Used by Compiler if its LoadURL
+// field is nil.
+var LoadURL = func(s string) (io.ReadCloser, error) {
+ u, err := url.Parse(s)
+ if err != nil {
+ return nil, err
+ }
+ loader, ok := Loaders[u.Scheme]
+ if !ok {
+ return nil, LoaderNotFoundError(s)
+
+ }
+ return loader(s)
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go
new file mode 100644
index 0000000000..d65ae2a929
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go
@@ -0,0 +1,77 @@
+package jsonschema
+
+// Flag is output format with simple boolean property valid.
+type Flag struct {
+ Valid bool `json:"valid"`
+}
+
+// FlagOutput returns output in flag format
+func (ve *ValidationError) FlagOutput() Flag {
+ return Flag{}
+}
+
+// Basic ---
+
+// Basic is output format with flat list of output units.
+type Basic struct {
+ Valid bool `json:"valid"`
+ Errors []BasicError `json:"errors"`
+}
+
+// BasicError is output unit in basic format.
+type BasicError struct {
+ KeywordLocation string `json:"keywordLocation"`
+ AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"`
+ InstanceLocation string `json:"instanceLocation"`
+ Error string `json:"error"`
+}
+
+// BasicOutput returns output in basic format
+func (ve *ValidationError) BasicOutput() Basic {
+ var errors []BasicError
+ var flatten func(*ValidationError)
+ flatten = func(ve *ValidationError) {
+ errors = append(errors, BasicError{
+ KeywordLocation: ve.KeywordLocation,
+ AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation,
+ InstanceLocation: ve.InstanceLocation,
+ Error: ve.Message,
+ })
+ for _, cause := range ve.Causes {
+ flatten(cause)
+ }
+ }
+ flatten(ve)
+ return Basic{Errors: errors}
+}
+
+// Detailed ---
+
+// Detailed is output format based on structure of schema.
+type Detailed struct {
+ Valid bool `json:"valid"`
+ KeywordLocation string `json:"keywordLocation"`
+ AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"`
+ InstanceLocation string `json:"instanceLocation"`
+ Error string `json:"error,omitempty"`
+ Errors []Detailed `json:"errors,omitempty"`
+}
+
+// DetailedOutput returns output in detailed format
+func (ve *ValidationError) DetailedOutput() Detailed {
+ var errors []Detailed
+ for _, cause := range ve.Causes {
+ errors = append(errors, cause.DetailedOutput())
+ }
+ var message = ve.Message
+ if len(ve.Causes) > 0 {
+ message = ""
+ }
+ return Detailed{
+ KeywordLocation: ve.KeywordLocation,
+ AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation,
+ InstanceLocation: ve.InstanceLocation,
+ Error: message,
+ Errors: errors,
+ }
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go
new file mode 100644
index 0000000000..18349daac7
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go
@@ -0,0 +1,280 @@
+package jsonschema
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/url"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+type resource struct {
+ url string // base url of resource. can be empty
+ floc string // fragment with json-pointer from root resource
+ doc interface{}
+ draft *Draft
+ subresources map[string]*resource // key is floc. only applicable for root resource
+ schema *Schema
+}
+
+func (r *resource) String() string {
+ return r.url + r.floc
+}
+
+func newResource(url string, r io.Reader) (*resource, error) {
+ if strings.IndexByte(url, '#') != -1 {
+ panic(fmt.Sprintf("BUG: newResource(%q)", url))
+ }
+ doc, err := unmarshal(r)
+ if err != nil {
+ return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err)
+ }
+ url, err = toAbs(url)
+ if err != nil {
+ return nil, err
+ }
+ return &resource{
+ url: url,
+ floc: "#",
+ doc: doc,
+ }, nil
+}
+
+// fillSubschemas fills subschemas in res into r.subresources
+func (r *resource) fillSubschemas(c *Compiler, res *resource) error {
+ if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil {
+ return err
+ }
+
+ if r.subresources == nil {
+ r.subresources = make(map[string]*resource)
+ }
+ if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil {
+ return err
+ }
+
+ // ensure subresource.url uniqueness
+ url2floc := make(map[string]string)
+ for _, sr := range r.subresources {
+ if sr.url != "" {
+ if floc, ok := url2floc[sr.url]; ok {
+ return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url)
+ }
+ url2floc[sr.url] = sr.floc
+ }
+ }
+
+ return nil
+}
+
+// listResources lists all subresources in res
+func (r *resource) listResources(res *resource) []*resource {
+ var result []*resource
+ prefix := res.floc + "/"
+ for _, sr := range r.subresources {
+ if strings.HasPrefix(sr.floc, prefix) {
+ result = append(result, sr)
+ }
+ }
+ return result
+}
+
+func (r *resource) findResource(url string) *resource {
+ if r.url == url {
+ return r
+ }
+ for _, res := range r.subresources {
+ if res.url == url {
+ return res
+ }
+ }
+ return nil
+}
+
+// resolve fragment f with sr as base
+func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) {
+ if f == "#" || f == "#/" {
+ return sr, nil
+ }
+
+ // resolve by anchor
+ if !strings.HasPrefix(f, "#/") {
+ // check in given resource
+ for _, anchor := range r.draft.anchors(sr.doc) {
+ if anchor == f[1:] {
+ return sr, nil
+ }
+ }
+
+ // check in subresources that has same base url
+ prefix := sr.floc + "/"
+ for _, res := range r.subresources {
+ if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url {
+ for _, anchor := range r.draft.anchors(res.doc) {
+ if anchor == f[1:] {
+ return res, nil
+ }
+ }
+ }
+ }
+ return nil, nil
+ }
+
+ // resolve by ptr
+ floc := sr.floc + f[1:]
+ if res, ok := r.subresources[floc]; ok {
+ return res, nil
+ }
+
+ // non-standrad location
+ doc := r.doc
+ for _, item := range strings.Split(floc[2:], "/") {
+ item = strings.Replace(item, "~1", "/", -1)
+ item = strings.Replace(item, "~0", "~", -1)
+ item, err := url.PathUnescape(item)
+ if err != nil {
+ return nil, err
+ }
+ switch d := doc.(type) {
+ case map[string]interface{}:
+ if _, ok := d[item]; !ok {
+ return nil, nil
+ }
+ doc = d[item]
+ case []interface{}:
+ index, err := strconv.Atoi(item)
+ if err != nil {
+ return nil, err
+ }
+ if index < 0 || index >= len(d) {
+ return nil, nil
+ }
+ doc = d[index]
+ default:
+ return nil, nil
+ }
+ }
+
+ id, err := r.draft.resolveID(r.baseURL(floc), doc)
+ if err != nil {
+ return nil, err
+ }
+ res := &resource{url: id, floc: floc, doc: doc}
+ r.subresources[floc] = res
+ if err := r.fillSubschemas(c, res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+func (r *resource) baseURL(floc string) string {
+ for {
+ if sr, ok := r.subresources[floc]; ok {
+ if sr.url != "" {
+ return sr.url
+ }
+ }
+ slash := strings.LastIndexByte(floc, '/')
+ if slash == -1 {
+ break
+ }
+ floc = floc[:slash]
+ }
+ return r.url
+}
+
+// url helpers ---
+
+func toAbs(s string) (string, error) {
+ // if windows absolute file path, convert to file url
+ // because: net/url parses driver name as scheme
+ if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` {
+ s = "file:///" + filepath.ToSlash(s)
+ }
+
+ u, err := url.Parse(s)
+ if err != nil {
+ return "", err
+ }
+ if u.IsAbs() {
+ return s, nil
+ }
+
+ // s is filepath
+ if s, err = filepath.Abs(s); err != nil {
+ return "", err
+ }
+ if runtime.GOOS == "windows" {
+ s = "file:///" + filepath.ToSlash(s)
+ } else {
+ s = "file://" + s
+ }
+ u, err = url.Parse(s) // to fix spaces in filepath
+ return u.String(), err
+}
+
+func resolveURL(base, ref string) (string, error) {
+ if ref == "" {
+ return base, nil
+ }
+ if strings.HasPrefix(ref, "urn:") {
+ return ref, nil
+ }
+
+ refURL, err := url.Parse(ref)
+ if err != nil {
+ return "", err
+ }
+ if refURL.IsAbs() {
+ return ref, nil
+ }
+
+ if strings.HasPrefix(base, "urn:") {
+ base, _ = split(base)
+ return base + ref, nil
+ }
+
+ baseURL, err := url.Parse(base)
+ if err != nil {
+ return "", err
+ }
+ return baseURL.ResolveReference(refURL).String(), nil
+}
+
+func split(uri string) (string, string) {
+ hash := strings.IndexByte(uri, '#')
+ if hash == -1 {
+ return uri, "#"
+ }
+ f := uri[hash:]
+ if f == "#/" {
+ f = "#"
+ }
+ return uri[0:hash], f
+}
+
+func (s *Schema) url() string {
+ u, _ := split(s.Location)
+ return u
+}
+
+func (s *Schema) loc() string {
+ _, f := split(s.Location)
+ return f[1:]
+}
+
+func unmarshal(r io.Reader) (interface{}, error) {
+ decoder := json.NewDecoder(r)
+ decoder.UseNumber()
+ var doc interface{}
+ if err := decoder.Decode(&doc); err != nil {
+ return nil, err
+ }
+ if t, _ := decoder.Token(); t != nil {
+ return nil, fmt.Errorf("invalid character %v after top-level value", t)
+ }
+ return doc, nil
+}
diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go
new file mode 100644
index 0000000000..688f0a6fee
--- /dev/null
+++ b/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go
@@ -0,0 +1,900 @@
+package jsonschema
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "hash/maphash"
+ "math/big"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Schema represents compiled version of json-schema.
+type Schema struct {
+ Location string // absolute location
+
+ Draft *Draft // draft used by schema.
+ meta *Schema
+ vocab []string
+ dynamicAnchors []*Schema
+
+ // type agnostic validations
+ Format string
+ format func(interface{}) bool
+ Always *bool // always pass/fail. used when booleans are used as schemas in draft-07.
+ Ref *Schema
+ RecursiveAnchor bool
+ RecursiveRef *Schema
+ DynamicAnchor string
+ DynamicRef *Schema
+ dynamicRefAnchor string
+ Types []string // allowed types.
+ Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant.
+ Enum []interface{} // allowed values.
+ enumError string // error message for enum fail. captured here to avoid constructing error message every time.
+ Not *Schema
+ AllOf []*Schema
+ AnyOf []*Schema
+ OneOf []*Schema
+ If *Schema
+ Then *Schema // nil, when If is nil.
+ Else *Schema // nil, when If is nil.
+
+ // object validations
+ MinProperties int // -1 if not specified.
+ MaxProperties int // -1 if not specified.
+ Required []string // list of required properties.
+ Properties map[string]*Schema
+ PropertyNames *Schema
+ RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema.
+ PatternProperties map[*regexp.Regexp]*Schema
+ AdditionalProperties interface{} // nil or bool or *Schema.
+ Dependencies map[string]interface{} // map value is *Schema or []string.
+ DependentRequired map[string][]string
+ DependentSchemas map[string]*Schema
+ UnevaluatedProperties *Schema
+
+ // array validations
+ MinItems int // -1 if not specified.
+ MaxItems int // -1 if not specified.
+ UniqueItems bool
+ Items interface{} // nil or *Schema or []*Schema
+ AdditionalItems interface{} // nil or bool or *Schema.
+ PrefixItems []*Schema
+ Items2020 *Schema // items keyword reintroduced in draft 2020-12
+ Contains *Schema
+ ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated"
+ MinContains int // 1 if not specified
+ MaxContains int // -1 if not specified
+ UnevaluatedItems *Schema
+
+ // string validations
+ MinLength int // -1 if not specified.
+ MaxLength int // -1 if not specified.
+ Pattern *regexp.Regexp
+ ContentEncoding string
+ decoder func(string) ([]byte, error)
+ ContentMediaType string
+ mediaType func([]byte) error
+ ContentSchema *Schema
+
+ // number validators
+ Minimum *big.Rat
+ ExclusiveMinimum *big.Rat
+ Maximum *big.Rat
+ ExclusiveMaximum *big.Rat
+ MultipleOf *big.Rat
+
+ // annotations. captured only when Compiler.ExtractAnnotations is true.
+ Title string
+ Description string
+ Default interface{}
+ Comment string
+ ReadOnly bool
+ WriteOnly bool
+ Examples []interface{}
+ Deprecated bool
+
+ // user defined extensions
+ Extensions map[string]ExtSchema
+}
+
+func (s *Schema) String() string {
+ return s.Location
+}
+
+func newSchema(url, floc string, draft *Draft, doc interface{}) *Schema {
+ // fill with default values
+ s := &Schema{
+ Location: url + floc,
+ Draft: draft,
+ MinProperties: -1,
+ MaxProperties: -1,
+ MinItems: -1,
+ MaxItems: -1,
+ MinContains: 1,
+ MaxContains: -1,
+ MinLength: -1,
+ MaxLength: -1,
+ }
+
+ if doc, ok := doc.(map[string]interface{}); ok {
+ if ra, ok := doc["$recursiveAnchor"]; ok {
+ if ra, ok := ra.(bool); ok {
+ s.RecursiveAnchor = ra
+ }
+ }
+ if da, ok := doc["$dynamicAnchor"]; ok {
+ if da, ok := da.(string); ok {
+ s.DynamicAnchor = da
+ }
+ }
+ }
+ return s
+}
+
+func (s *Schema) hasVocab(name string) bool {
+ if s == nil { // during bootstrap
+ return true
+ }
+ if name == "core" {
+ return true
+ }
+ for _, url := range s.vocab {
+ if url == "https://json-schema.org/draft/2019-09/vocab/"+name {
+ return true
+ }
+ if url == "https://json-schema.org/draft/2020-12/vocab/"+name {
+ return true
+ }
+ }
+ return false
+}
+
+// Validate validates given doc, against the json-schema s.
+//
+// the v must be the raw json value. for number precision
+// unmarshal with json.UseNumber().
+//
+// returns *ValidationError if v does not confirm with schema s.
+// returns InfiniteLoopError if it detects loop during validation.
+// returns InvalidJSONTypeError if it detects any non json value in v.
+func (s *Schema) Validate(v interface{}) (err error) {
+ return s.validateValue(v, "")
+}
+
+func (s *Schema) validateValue(v interface{}, vloc string) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ switch r := r.(type) {
+ case InfiniteLoopError, InvalidJSONTypeError:
+ err = r.(error)
+ default:
+ panic(r)
+ }
+ }
+ }()
+ if _, err := s.validate(nil, 0, "", v, vloc); err != nil {
+ ve := ValidationError{
+ KeywordLocation: "",
+ AbsoluteKeywordLocation: s.Location,
+ InstanceLocation: vloc,
+ Message: fmt.Sprintf("doesn't validate with %s", s.Location),
+ }
+ return ve.causes(err)
+ }
+ return nil
+}
+
+// validate validates given value v with this schema.
+func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) {
+ validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError {
+ return &ValidationError{
+ KeywordLocation: keywordLocation(scope, keywordPath),
+ AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath),
+ InstanceLocation: vloc,
+ Message: fmt.Sprintf(format, a...),
+ }
+ }
+
+ sref := schemaRef{spath, s, false}
+ if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil {
+ panic(err)
+ }
+ scope = append(scope, sref)
+ vscope++
+
+ // populate result
+ switch v := v.(type) {
+ case map[string]interface{}:
+ result.unevalProps = make(map[string]struct{})
+ for pname := range v {
+ result.unevalProps[pname] = struct{}{}
+ }
+ case []interface{}:
+ result.unevalItems = make(map[int]struct{})
+ for i := range v {
+ result.unevalItems[i] = struct{}{}
+ }
+ }
+
+ validate := func(sch *Schema, schPath string, v interface{}, vpath string) error {
+ vloc := vloc
+ if vpath != "" {
+ vloc += "/" + vpath
+ }
+ _, err := sch.validate(scope, 0, schPath, v, vloc)
+ return err
+ }
+
+ validateInplace := func(sch *Schema, schPath string) error {
+ vr, err := sch.validate(scope, vscope, schPath, v, vloc)
+ if err == nil {
+ // update result
+ for pname := range result.unevalProps {
+ if _, ok := vr.unevalProps[pname]; !ok {
+ delete(result.unevalProps, pname)
+ }
+ }
+ for i := range result.unevalItems {
+ if _, ok := vr.unevalItems[i]; !ok {
+ delete(result.unevalItems, i)
+ }
+ }
+ }
+ return err
+ }
+
+ if s.Always != nil {
+ if !*s.Always {
+ return result, validationError("", "not allowed")
+ }
+ return result, nil
+ }
+
+ if len(s.Types) > 0 {
+ vType := jsonType(v)
+ matched := false
+ for _, t := range s.Types {
+ if vType == t {
+ matched = true
+ break
+ } else if t == "integer" && vType == "number" {
+ num, _ := new(big.Rat).SetString(fmt.Sprint(v))
+ if num.IsInt() {
+ matched = true
+ break
+ }
+ }
+ }
+ if !matched {
+ return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType)
+ }
+ }
+
+ var errors []error
+
+ if len(s.Constant) > 0 {
+ if !equals(v, s.Constant[0]) {
+ switch jsonType(s.Constant[0]) {
+ case "object", "array":
+ errors = append(errors, validationError("const", "const failed"))
+ default:
+ errors = append(errors, validationError("const", "value must be %#v", s.Constant[0]))
+ }
+ }
+ }
+
+ if len(s.Enum) > 0 {
+ matched := false
+ for _, item := range s.Enum {
+ if equals(v, item) {
+ matched = true
+ break
+ }
+ }
+ if !matched {
+ errors = append(errors, validationError("enum", s.enumError))
+ }
+ }
+
+ if s.format != nil && !s.format(v) {
+ var val = v
+ if v, ok := v.(string); ok {
+ val = quote(v)
+ }
+ errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format)))
+ }
+
+ switch v := v.(type) {
+ case map[string]interface{}:
+ if s.MinProperties != -1 && len(v) < s.MinProperties {
+ errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v)))
+ }
+ if s.MaxProperties != -1 && len(v) > s.MaxProperties {
+ errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v)))
+ }
+ if len(s.Required) > 0 {
+ var missing []string
+ for _, pname := range s.Required {
+ if _, ok := v[pname]; !ok {
+ missing = append(missing, quote(pname))
+ }
+ }
+ if len(missing) > 0 {
+ errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", ")))
+ }
+ }
+
+ for pname, sch := range s.Properties {
+ if pvalue, ok := v[pname]; ok {
+ delete(result.unevalProps, pname)
+ if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+
+ if s.PropertyNames != nil {
+ for pname := range v {
+ if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+
+ if s.RegexProperties {
+ for pname := range v {
+ if !isRegex(pname) {
+ errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname)))
+ }
+ }
+ }
+ for pattern, sch := range s.PatternProperties {
+ for pname, pvalue := range v {
+ if pattern.MatchString(pname) {
+ delete(result.unevalProps, pname)
+ if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+ }
+ if s.AdditionalProperties != nil {
+ if allowed, ok := s.AdditionalProperties.(bool); ok {
+ if !allowed && len(result.unevalProps) > 0 {
+ errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames()))
+ }
+ } else {
+ schema := s.AdditionalProperties.(*Schema)
+ for pname := range result.unevalProps {
+ if pvalue, ok := v[pname]; ok {
+ if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+ }
+ result.unevalProps = nil
+ }
+ for dname, dvalue := range s.Dependencies {
+ if _, ok := v[dname]; ok {
+ switch dvalue := dvalue.(type) {
+ case *Schema:
+ if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil {
+ errors = append(errors, err)
+ }
+ case []string:
+ for i, pname := range dvalue {
+ if _, ok := v[pname]; !ok {
+ errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname)))
+ }
+ }
+ }
+ }
+ }
+ for dname, dvalue := range s.DependentRequired {
+ if _, ok := v[dname]; ok {
+ for i, pname := range dvalue {
+ if _, ok := v[pname]; !ok {
+ errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname)))
+ }
+ }
+ }
+ }
+ for dname, sch := range s.DependentSchemas {
+ if _, ok := v[dname]; ok {
+ if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+
+ case []interface{}:
+ if s.MinItems != -1 && len(v) < s.MinItems {
+ errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v)))
+ }
+ if s.MaxItems != -1 && len(v) > s.MaxItems {
+ errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v)))
+ }
+ if s.UniqueItems {
+ if len(v) <= 20 {
+ outer1:
+ for i := 1; i < len(v); i++ {
+ for j := 0; j < i; j++ {
+ if equals(v[i], v[j]) {
+ errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i))
+ break outer1
+ }
+ }
+ }
+ } else {
+ m := make(map[uint64][]int)
+ var h maphash.Hash
+ outer2:
+ for i, item := range v {
+ h.Reset()
+ hash(item, &h)
+ k := h.Sum64()
+ if err != nil {
+ panic(err)
+ }
+ arr, ok := m[k]
+ if ok {
+ for _, j := range arr {
+ if equals(v[j], item) {
+ errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i))
+ break outer2
+ }
+ }
+ }
+ arr = append(arr, i)
+ m[k] = arr
+ }
+ }
+ }
+
+ // items + additionalItems
+ switch items := s.Items.(type) {
+ case *Schema:
+ for i, item := range v {
+ if err := validate(items, "items", item, strconv.Itoa(i)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ result.unevalItems = nil
+ case []*Schema:
+ for i, item := range v {
+ if i < len(items) {
+ delete(result.unevalItems, i)
+ if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil {
+ errors = append(errors, err)
+ }
+ } else if sch, ok := s.AdditionalItems.(*Schema); ok {
+ delete(result.unevalItems, i)
+ if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil {
+ errors = append(errors, err)
+ }
+ } else {
+ break
+ }
+ }
+ if additionalItems, ok := s.AdditionalItems.(bool); ok {
+ if additionalItems {
+ result.unevalItems = nil
+ } else if len(v) > len(items) {
+ errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v)))
+ }
+ }
+ }
+
+ // prefixItems + items
+ for i, item := range v {
+ if i < len(s.PrefixItems) {
+ delete(result.unevalItems, i)
+ if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil {
+ errors = append(errors, err)
+ }
+ } else if s.Items2020 != nil {
+ delete(result.unevalItems, i)
+ if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil {
+ errors = append(errors, err)
+ }
+ } else {
+ break
+ }
+ }
+
+ // contains + minContains + maxContains
+ if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) {
+ matched := 0
+ var causes []error
+ for i, item := range v {
+ if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil {
+ causes = append(causes, err)
+ } else {
+ matched++
+ if s.ContainsEval {
+ delete(result.unevalItems, i)
+ }
+ }
+ }
+ if s.MinContains != -1 && matched < s.MinContains {
+ errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...))
+ }
+ if s.MaxContains != -1 && matched > s.MaxContains {
+ errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched))
+ }
+ }
+
+ case string:
+ // minLength + maxLength
+ if s.MinLength != -1 || s.MaxLength != -1 {
+ length := utf8.RuneCount([]byte(v))
+ if s.MinLength != -1 && length < s.MinLength {
+ errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length))
+ }
+ if s.MaxLength != -1 && length > s.MaxLength {
+ errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length))
+ }
+ }
+
+ if s.Pattern != nil && !s.Pattern.MatchString(v) {
+ errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String())))
+ }
+
+ // contentEncoding + contentMediaType
+ if s.decoder != nil || s.mediaType != nil {
+ decoded := s.ContentEncoding == ""
+ var content []byte
+ if s.decoder != nil {
+ b, err := s.decoder(v)
+ if err != nil {
+ errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding))
+ } else {
+ content, decoded = b, true
+ }
+ }
+ if decoded && s.mediaType != nil {
+ if s.decoder == nil {
+ content = []byte(v)
+ }
+ if err := s.mediaType(content); err != nil {
+ errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType)))
+ }
+ }
+ if decoded && s.ContentSchema != nil {
+ contentJSON, err := unmarshal(bytes.NewReader(content))
+ if err != nil {
+ errors = append(errors, validationError("contentSchema", "value is not valid json"))
+ } else {
+ err := validate(s.ContentSchema, "contentSchema", contentJSON, "")
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+ }
+
+ case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64:
+ // lazy convert to *big.Rat to avoid allocation
+ var numVal *big.Rat
+ num := func() *big.Rat {
+ if numVal == nil {
+ numVal, _ = new(big.Rat).SetString(fmt.Sprint(v))
+ }
+ return numVal
+ }
+ f64 := func(r *big.Rat) float64 {
+ f, _ := r.Float64()
+ return f
+ }
+ if s.Minimum != nil && num().Cmp(s.Minimum) < 0 {
+ errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v))
+ }
+ if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 {
+ errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v))
+ }
+ if s.Maximum != nil && num().Cmp(s.Maximum) > 0 {
+ errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v))
+ }
+ if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 {
+ errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v))
+ }
+ if s.MultipleOf != nil {
+ if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() {
+ errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf)))
+ }
+ }
+ }
+
+ // $ref + $recursiveRef + $dynamicRef
+ validateRef := func(sch *Schema, refPath string) error {
+ if sch != nil {
+ if err := validateInplace(sch, refPath); err != nil {
+ var url = sch.Location
+ if s.url() == sch.url() {
+ url = sch.loc()
+ }
+ return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err)
+ }
+ }
+ return nil
+ }
+ if err := validateRef(s.Ref, "$ref"); err != nil {
+ errors = append(errors, err)
+ }
+ if s.RecursiveRef != nil {
+ sch := s.RecursiveRef
+ if sch.RecursiveAnchor {
+ // recursiveRef based on scope
+ for _, e := range scope {
+ if e.schema.RecursiveAnchor {
+ sch = e.schema
+ break
+ }
+ }
+ }
+ if err := validateRef(sch, "$recursiveRef"); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ if s.DynamicRef != nil {
+ sch := s.DynamicRef
+ if s.dynamicRefAnchor != "" && sch.DynamicAnchor == s.dynamicRefAnchor {
+ // dynamicRef based on scope
+ for i := len(scope) - 1; i >= 0; i-- {
+ sr := scope[i]
+ if sr.discard {
+ break
+ }
+ for _, da := range sr.schema.dynamicAnchors {
+ if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef {
+ sch = da
+ break
+ }
+ }
+ }
+ }
+ if err := validateRef(sch, "$dynamicRef"); err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if s.Not != nil && validateInplace(s.Not, "not") == nil {
+ errors = append(errors, validationError("not", "not failed"))
+ }
+
+ for i, sch := range s.AllOf {
+ schPath := "allOf/" + strconv.Itoa(i)
+ if err := validateInplace(sch, schPath); err != nil {
+ errors = append(errors, validationError(schPath, "allOf failed").add(err))
+ }
+ }
+
+ if len(s.AnyOf) > 0 {
+ matched := false
+ var causes []error
+ for i, sch := range s.AnyOf {
+ if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil {
+ matched = true
+ } else {
+ causes = append(causes, err)
+ }
+ }
+ if !matched {
+ errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...))
+ }
+ }
+
+ if len(s.OneOf) > 0 {
+ matched := -1
+ var causes []error
+ for i, sch := range s.OneOf {
+ if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil {
+ if matched == -1 {
+ matched = i
+ } else {
+ errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i))
+ break
+ }
+ } else {
+ causes = append(causes, err)
+ }
+ }
+ if matched == -1 {
+ errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...))
+ }
+ }
+
+ // if + then + else
+ if s.If != nil {
+ err := validateInplace(s.If, "if")
+ // "if" leaves dynamic scope
+ scope[len(scope)-1].discard = true
+ if err == nil {
+ if s.Then != nil {
+ if err := validateInplace(s.Then, "then"); err != nil {
+ errors = append(errors, validationError("then", "if-then failed").add(err))
+ }
+ }
+ } else {
+ if s.Else != nil {
+ if err := validateInplace(s.Else, "else"); err != nil {
+ errors = append(errors, validationError("else", "if-else failed").add(err))
+ }
+ }
+ }
+ // restore dynamic scope
+ scope[len(scope)-1].discard = false
+ }
+
+ for _, ext := range s.Extensions {
+ if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ // unevaluatedProperties + unevaluatedItems
+ switch v := v.(type) {
+ case map[string]interface{}:
+ if s.UnevaluatedProperties != nil {
+ for pname := range result.unevalProps {
+ if pvalue, ok := v[pname]; ok {
+ if err := validate(s.UnevaluatedProperties, "unevaluatedProperties", pvalue, escape(pname)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ }
+ result.unevalProps = nil
+ }
+ case []interface{}:
+ if s.UnevaluatedItems != nil {
+ for i := range result.unevalItems {
+ if err := validate(s.UnevaluatedItems, "unevaluatedItems", v[i], strconv.Itoa(i)); err != nil {
+ errors = append(errors, err)
+ }
+ }
+ result.unevalItems = nil
+ }
+ }
+
+ switch len(errors) {
+ case 0:
+ return result, nil
+ case 1:
+ return result, errors[0]
+ default:
+ return result, validationError("", "").add(errors...) // empty message, used just for wrapping
+ }
+}
+
+type validationResult struct {
+ unevalProps map[string]struct{}
+ unevalItems map[int]struct{}
+}
+
+func (vr validationResult) unevalPnames() string {
+ pnames := make([]string, 0, len(vr.unevalProps))
+ for pname := range vr.unevalProps {
+ pnames = append(pnames, quote(pname))
+ }
+ return strings.Join(pnames, ", ")
+}
+
+// jsonType returns the json type of given value v.
+//
+// It panics if the given value is not valid json value
+func jsonType(v interface{}) string {
+ switch v.(type) {
+ case nil:
+ return "null"
+ case bool:
+ return "boolean"
+ case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64:
+ return "number"
+ case string:
+ return "string"
+ case []interface{}:
+ return "array"
+ case map[string]interface{}:
+ return "object"
+ }
+ panic(InvalidJSONTypeError(fmt.Sprintf("%T", v)))
+}
+
+// equals tells if given two json values are equal or not.
+func equals(v1, v2 interface{}) bool {
+ v1Type := jsonType(v1)
+ if v1Type != jsonType(v2) {
+ return false
+ }
+ switch v1Type {
+ case "array":
+ arr1, arr2 := v1.([]interface{}), v2.([]interface{})
+ if len(arr1) != len(arr2) {
+ return false
+ }
+ for i := range arr1 {
+ if !equals(arr1[i], arr2[i]) {
+ return false
+ }
+ }
+ return true
+ case "object":
+ obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{})
+ if len(obj1) != len(obj2) {
+ return false
+ }
+ for k, v1 := range obj1 {
+ if v2, ok := obj2[k]; ok {
+ if !equals(v1, v2) {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+ case "number":
+ num1, _ := new(big.Rat).SetString(fmt.Sprint(v1))
+ num2, _ := new(big.Rat).SetString(fmt.Sprint(v2))
+ return num1.Cmp(num2) == 0
+ default:
+ return v1 == v2
+ }
+}
+
+func hash(v interface{}, h *maphash.Hash) {
+ switch v := v.(type) {
+ case nil:
+ h.WriteByte(0)
+ case bool:
+ h.WriteByte(1)
+ if v {
+ h.WriteByte(1)
+ } else {
+ h.WriteByte(0)
+ }
+ case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64:
+ h.WriteByte(2)
+ num, _ := new(big.Rat).SetString(fmt.Sprint(v))
+ h.Write(num.Num().Bytes())
+ h.Write(num.Denom().Bytes())
+ case string:
+ h.WriteByte(3)
+ h.WriteString(v)
+ case []interface{}:
+ h.WriteByte(4)
+ for _, item := range v {
+ hash(item, h)
+ }
+ case map[string]interface{}:
+ h.WriteByte(5)
+ props := make([]string, 0, len(v))
+ for prop := range v {
+ props = append(props, prop)
+ }
+ sort.Slice(props, func(i, j int) bool {
+ return props[i] < props[j]
+ })
+ for _, prop := range props {
+ hash(prop, h)
+ hash(v[prop], h)
+ }
+ default:
+ panic(InvalidJSONTypeError(fmt.Sprintf("%T", v)))
+ }
+}
+
+// escape converts given token to valid json-pointer token
+func escape(token string) string {
+ token = strings.ReplaceAll(token, "~", "~0")
+ token = strings.ReplaceAll(token, "/", "~1")
+ return url.PathEscape(token)
+}
diff --git a/vendor/github.com/sivchari/nosnakecase/.gitignore b/vendor/github.com/sivchari/nosnakecase/.gitignore
deleted file mode 100644
index 66fd13c903..0000000000
--- a/vendor/github.com/sivchari/nosnakecase/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Dependency directories (remove the comment below to include it)
-# vendor/
diff --git a/vendor/github.com/sivchari/nosnakecase/.golangci.yml b/vendor/github.com/sivchari/nosnakecase/.golangci.yml
deleted file mode 100644
index 31e05c4ee1..0000000000
--- a/vendor/github.com/sivchari/nosnakecase/.golangci.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-run:
- timeout: 5m
- skip-files: []
- go: '1.17'
-
-linters-settings:
- govet:
- enable-all: true
- disable:
- - fieldalignment
- gocyclo:
- min-complexity: 18
- misspell:
- locale: US
- godox:
- keywords:
- - FIXME
- gofumpt:
- extra-rules: true
-
-linters:
- disable-all: true
- enable:
- - govet
- - revive
- - goimports
- - staticcheck
- - gosimple
- - unused
- - godox
- - gofumpt
- - misspell
- - gocyclo
-
-issues:
- exclude-use-default: true
- max-per-linter: 0
- max-same-issues: 0
- exclude: []
-
diff --git a/vendor/github.com/sivchari/nosnakecase/README.md b/vendor/github.com/sivchari/nosnakecase/README.md
deleted file mode 100644
index 69bb660462..0000000000
--- a/vendor/github.com/sivchari/nosnakecase/README.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# nosnakecase
-nosnakecase is a linter that detects snake case of variable naming and function name.
-
-## Instruction
-
-```sh
-go install github.com/sivchari/nosnakecase/cmd/nosnakecase@latest
-```
-
-## Usage
-
-```go
-package sandbox
-
-// global variable name with underscore.
-var v_v = 0 // want "v_v is used under score. You should use mixedCap or MixedCap."
-
-// global constant name with underscore.
-const c_c = 0 // want "c_c is used under score. You should use mixedCap or MixedCap."
-
-// struct name with underscore.
-type S_a struct { // want "S_a is used under score. You should use mixedCap or MixedCap."
- fi int
-}
-
-// non-exported struct field name with underscore.
-type Sa struct {
- fi_a int // // want "fi_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// function as struct field, with parameter name with underscore.
-type Sb struct {
- fib func(p_a int) // want "p_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// exported struct field with underscore.
-type Sc struct {
- Fi_A int // want "Fi_A is used under score. You should use mixedCap or MixedCap."
-}
-
-// function as struct field, with return name with underscore.
-type Sd struct {
- fib func(p int) (r_a int) // want "r_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// interface name with underscore.
-type I_a interface { // want "I_a is used under score. You should use mixedCap or MixedCap."
- fn(p int)
-}
-
-// interface with parameter name with underscore.
-type Ia interface {
- fn(p_a int) // want "p_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// interface with parameter name with underscore.
-type Ib interface {
- Fn(p_a int) // want "p_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// function as struct field, with return name with underscore.
-type Ic interface {
- Fn_a() // want "Fn_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// interface with return name with underscore.
-type Id interface {
- Fn() (r_a int) // want "r_a is used under score. You should use mixedCap or MixedCap."
-}
-
-// function name with underscore.
-func f_a() {} // want "f_a is used under score. You should use mixedCap or MixedCap."
-
-// function's parameter name with underscore.
-func fb(p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
-
-// named return with underscore.
-func fc() (r_b int) { // want "r_b is used under score. You should use mixedCap or MixedCap."
- return 0
-}
-
-// local variable (short declaration) with underscore.
-func fd(p int) int {
- v_b := p * 2 // want "v_b is used under score. You should use mixedCap or MixedCap."
-
- return v_b // want "v_b is used under score. You should use mixedCap or MixedCap."
-}
-
-// local constant with underscore.
-func fe(p int) int {
- const v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap."
-
- return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap."
-}
-
-// local variable with underscore.
-func ff(p int) int {
- var v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap."
-
- return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap."
-}
-
-// inner function, parameter name with underscore.
-func fg() {
- fgl := func(p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
- fgl(1)
-}
-
-type Foo struct{}
-
-// method name with underscore.
-func (f Foo) f_a() {} // want "f_a is used under score. You should use mixedCap or MixedCap."
-
-// method's parameter name with underscore.
-func (f Foo) fb(p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
-
-// named return with underscore.
-func (f Foo) fc() (r_b int) { return 0 } // want "r_b is used under score. You should use mixedCap or MixedCap."
-
-// local variable (short declaration) with underscore.
-func (f Foo) fd(p int) int {
- v_b := p * 2 // want "v_b is used under score. You should use mixedCap or MixedCap."
-
- return v_b // want "v_b is used under score. You should use mixedCap or MixedCap."
-}
-
-// local constant with underscore.
-func (f Foo) fe(p int) int {
- const v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap."
-
- return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap."
-}
-
-// local variable with underscore.
-func (f Foo) ff(p int) int {
- var v_b = 2 // want "v_b is used under score. You should use mixedCap or MixedCap."
-
- return v_b * p // want "v_b is used under score. You should use mixedCap or MixedCap."
-}
-
-func fna(a, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
-
-func fna1(a string, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
-
-func fnb(a, b, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
-
-func fnb1(a, b string, p_a int) {} // want "p_a is used under score. You should use mixedCap or MixedCap."
-
-func fnd(
- p_a int, // want "p_a is used under score. You should use mixedCap or MixedCap."
- p_b int, // want "p_b is used under score. You should use mixedCap or MixedCap."
- p_c int, // want "p_c is used under score. You should use mixedCap or MixedCap."
-) {
-}
-```
-
-```console
-go vet -vettool=(which nosnakecase) ./...
-
-# command-line-arguments
-# a
-./a.go:4:5: v_v is used under score. You should use mixedCap or MixedCap.
-./a.go:7:7: c_c is used under score. You should use mixedCap or MixedCap.
-./a.go:10:6: S_a is used under score. You should use mixedCap or MixedCap.
-./a.go:16:2: fi_a is used under score. You should use mixedCap or MixedCap.
-./a.go:21:11: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:26:2: Fi_A is used under score. You should use mixedCap or MixedCap.
-./a.go:31:19: r_a is used under score. You should use mixedCap or MixedCap.
-./a.go:35:6: I_a is used under score. You should use mixedCap or MixedCap.
-./a.go:41:5: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:46:5: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:51:2: Fn_a is used under score. You should use mixedCap or MixedCap.
-./a.go:56:8: r_a is used under score. You should use mixedCap or MixedCap.
-./a.go:60:6: f_a is used under score. You should use mixedCap or MixedCap.
-./a.go:63:9: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:66:12: r_b is used under score. You should use mixedCap or MixedCap.
-./a.go:72:2: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:74:9: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:79:8: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:81:9: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:86:6: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:88:9: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:93:14: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:100:14: f_a is used under score. You should use mixedCap or MixedCap.
-./a.go:103:17: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:106:20: r_b is used under score. You should use mixedCap or MixedCap.
-./a.go:110:2: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:112:9: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:117:8: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:119:9: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:124:6: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:126:9: v_b is used under score. You should use mixedCap or MixedCap.
-./a.go:129:13: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:131:21: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:133:16: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:135:24: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:138:2: p_a is used under score. You should use mixedCap or MixedCap.
-./a.go:139:2: p_b is used under score. You should use mixedCap or MixedCap.
-./a.go:140:2: p_c is used under score. You should use mixedCap or MixedCap.
-```
-
-## CI
-
-### CircleCI
-
-```yaml
-- run:
- name: install nosnakecase
- command: go install github.com/sivchari/nosnakecase/cmd/nosnakecase@latest
-
-- run:
- name: run nosnakecase
- command: go vet -vettool=`which nosnakecase` ./...
-```
-
-### GitHub Actions
-
-```yaml
-- name: install nosnakecase
- run: go install github.com/sivchari/nosnakecase/cmd/nosnakecase@latest
-
-- name: run nosnakecase
- run: go vet -vettool=`which nosnakecase` ./...
-```
diff --git a/vendor/github.com/sivchari/nosnakecase/nosnakecase.go b/vendor/github.com/sivchari/nosnakecase/nosnakecase.go
deleted file mode 100644
index 88cf70e3f0..0000000000
--- a/vendor/github.com/sivchari/nosnakecase/nosnakecase.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package nosnakecase
-
-import (
- "go/ast"
- "go/token"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/inspector"
-)
-
-const doc = "nosnakecase is a linter that detects snake case of variable naming and function name."
-
-// Analyzer is a nosnakecase linter.
-var Analyzer = &analysis.Analyzer{
- Name: "nosnakecase",
- Doc: doc,
- Run: run,
- Requires: []*analysis.Analyzer{
- inspect.Analyzer,
- },
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- result := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
-
- nodeFilter := []ast.Node{
- (*ast.Ident)(nil),
- }
-
- result.Preorder(nodeFilter, func(n ast.Node) {
- switch n := n.(type) {
- case *ast.Ident:
- report(pass, n.Pos(), n.Name)
- }
- })
-
- return nil, nil
-}
-
-func report(pass *analysis.Pass, pos token.Pos, name string) {
- // skip import _ "xxx"
- if name == "_" {
- return
- }
-
- // skip package xxx_test
- if strings.Contains(name, "_test") {
- return
- }
-
- // If prefix is Test or Benchmark, Fuzz, skip
- // FYI https://go.dev/blog/examples
- if strings.HasPrefix(name, "Test") || strings.HasPrefix(name, "Benchmark") || strings.HasPrefix(name, "Fuzz") {
- return
- }
-
- if strings.Contains(name, "_") {
- pass.Reportf(pos, "%s contains underscore. You should use mixedCap or MixedCap.", name)
- return
- }
-}
diff --git a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go
index 6da17bd867..79e7bba863 100644
--- a/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go
+++ b/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go
@@ -121,7 +121,20 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) {
}
for _, file := range pass.Files {
+ // Keep track of parents so that can can traverse upwards to check for
+ // FuncDecls and FuncLits.
+ var parents []ast.Node
+
ast.Inspect(file, func(n ast.Node) bool {
+ if n == nil {
+ // Pop, since we're done with this node and its children.
+ parents = parents[:len(parents)-1]
+ } else {
+ // Push this node on the stack, since its children will be visited
+ // next.
+ parents = append(parents, n)
+ }
+
ret, ok := n.(*ast.ReturnStmt)
if !ok {
return true
@@ -137,6 +150,17 @@ func run(cfg WrapcheckConfig) func(*analysis.Pass) (interface{}, error) {
// to handle it by checking the return params of the function.
retFn, ok := expr.(*ast.CallExpr)
if ok {
+ // If you go up, and the parent is a FuncLit, then don't report an
+ // error as you are in an anonymous function. If you are inside a
+ // FuncDecl, then continue as normal.
+ for i := len(parents) - 1; i > 0; i-- {
+ if _, ok := parents[i].(*ast.FuncLit); ok {
+ return true
+ } else if _, ok := parents[i].(*ast.FuncDecl); ok {
+ break
+ }
+ }
+
// If the return type of the function is a single error. This will not
// match an error within multiple return values, for that, the below
// tuple check is required.
diff --git a/vendor/go-simpler.org/musttag/README.md b/vendor/go-simpler.org/musttag/README.md
index 54c2744ccf..3f3a253302 100644
--- a/vendor/go-simpler.org/musttag/README.md
+++ b/vendor/go-simpler.org/musttag/README.md
@@ -64,7 +64,7 @@ Alternatively, you can download a prebuilt binary from the [Releases][9] page to
Run `golangci-lint` with `musttag` enabled.
See the list of [available options][10] to configure the linter.
-When using `sloglint` standalone, pass the options as flags.
+When using `musttag` standalone, pass the options as flags.
### Custom packages
diff --git a/vendor/go-simpler.org/musttag/musttag.go b/vendor/go-simpler.org/musttag/musttag.go
index b7e41f3d36..d7911770fb 100644
--- a/vendor/go-simpler.org/musttag/musttag.go
+++ b/vendor/go-simpler.org/musttag/musttag.go
@@ -150,16 +150,19 @@ type checker struct {
}
func (c *checker) parseStruct(typ types.Type) (*types.Struct, bool) {
- for {
- // unwrap pointers (if any) first.
- ptr, ok := typ.(*types.Pointer)
- if !ok {
- break
- }
- typ = ptr.Elem()
- }
-
switch typ := typ.(type) {
+ case *types.Pointer:
+ return c.parseStruct(typ.Elem())
+
+ case *types.Array:
+ return c.parseStruct(typ.Elem())
+
+ case *types.Slice:
+ return c.parseStruct(typ.Elem())
+
+ case *types.Map:
+ return c.parseStruct(typ.Elem())
+
case *types.Named: // a struct of the named type.
pkg := typ.Obj().Pkg()
if pkg == nil {
diff --git a/vendor/go-simpler.org/sloglint/Makefile b/vendor/go-simpler.org/sloglint/Makefile
new file mode 100644
index 0000000000..6165b16f47
--- /dev/null
+++ b/vendor/go-simpler.org/sloglint/Makefile
@@ -0,0 +1,28 @@
+.POSIX:
+.SUFFIXES:
+
+all: test lint
+
+test:
+ go test -race -shuffle=on -cover ./...
+
+test/cover:
+ go test -race -shuffle=on -coverprofile=coverage.out ./...
+ go tool cover -html=coverage.out
+
+lint:
+ golangci-lint run
+
+tidy:
+ go mod tidy
+
+generate:
+ go generate ./...
+
+# run `make pre-commit` once to install the hook.
+pre-commit: .git/hooks/pre-commit test lint tidy generate
+ git diff --exit-code
+
+.git/hooks/pre-commit:
+ echo "make pre-commit" > .git/hooks/pre-commit
+ chmod +x .git/hooks/pre-commit
diff --git a/vendor/go-simpler.org/sloglint/README.md b/vendor/go-simpler.org/sloglint/README.md
index fc4d9debb9..7f6455c1c7 100644
--- a/vendor/go-simpler.org/sloglint/README.md
+++ b/vendor/go-simpler.org/sloglint/README.md
@@ -17,6 +17,7 @@ With `sloglint` you can enforce various rules for `log/slog` based on your prefe
* Enforce not mixing key-value pairs and attributes (default)
* Enforce using either key-value pairs only or attributes only (optional)
+* Enforce not using global loggers (optional)
* Enforce using methods that accept a context (optional)
* Enforce using static log messages (optional)
* Enforce using constants instead of raw keys (optional)
@@ -70,6 +71,17 @@ In contrast, the `attr-only` option causes `sloglint` to report any use of key-v
slog.Info("a user has logged in", "user_id", 42) // sloglint: key-value pairs should not be used
```
+### No global
+
+Some projects prefer to pass loggers as explicit dependencies.
+The `no-global` option causes `sloglint` to report the usage of global loggers.
+
+```go
+slog.Info("a user has logged in", "user_id", 42) // sloglint: global logger should not be used
+```
+
+Possible values are `all` (report all global loggers) and `default` (report only the default `slog` logger).
+
### Context only
Some `slog.Handler` implementations make use of the given `context.Context` (e.g. to access context values).
diff --git a/vendor/go-simpler.org/sloglint/sloglint.go b/vendor/go-simpler.org/sloglint/sloglint.go
index b7f72ce485..35cac14d13 100644
--- a/vendor/go-simpler.org/sloglint/sloglint.go
+++ b/vendor/go-simpler.org/sloglint/sloglint.go
@@ -9,6 +9,7 @@ import (
"go/token"
"go/types"
"strconv"
+ "strings"
"github.com/ettle/strcase"
"golang.org/x/tools/go/analysis"
@@ -22,6 +23,7 @@ type Options struct {
NoMixedArgs bool // Enforce not mixing key-value pairs and attributes (default true).
KVOnly bool // Enforce using key-value pairs only (overrides NoMixedArgs, incompatible with AttrOnly).
AttrOnly bool // Enforce using attributes only (overrides NoMixedArgs, incompatible with KVOnly).
+ NoGlobal string // Enforce not using global loggers ("all" or "default").
ContextOnly bool // Enforce using methods that accept a context.
StaticMsg bool // Enforce using static log messages.
NoRawKeys bool // Enforce using constants instead of raw keys.
@@ -43,11 +45,19 @@ func New(opts *Options) *analysis.Analyzer {
if opts.KVOnly && opts.AttrOnly {
return nil, fmt.Errorf("sloglint: Options.KVOnly and Options.AttrOnly: %w", errIncompatible)
}
+
+ switch opts.NoGlobal {
+ case "", "all", "default":
+ default:
+ return nil, fmt.Errorf("sloglint: Options.NoGlobal=%s: %w", opts.NoGlobal, errInvalidValue)
+ }
+
switch opts.KeyNamingCase {
case "", snakeCase, kebabCase, camelCase, pascalCase:
default:
return nil, fmt.Errorf("sloglint: Options.KeyNamingCase=%s: %w", opts.KeyNamingCase, errInvalidValue)
}
+
run(pass, opts)
return nil, nil
},
@@ -60,30 +70,34 @@ var (
)
func flags(opts *Options) flag.FlagSet {
- fs := flag.NewFlagSet("sloglint", flag.ContinueOnError)
+ fset := flag.NewFlagSet("sloglint", flag.ContinueOnError)
boolVar := func(value *bool, name, usage string) {
- fs.Func(name, usage, func(s string) error {
+ fset.Func(name, usage, func(s string) error {
v, err := strconv.ParseBool(s)
*value = v
return err
})
}
+ strVar := func(value *string, name, usage string) {
+ fset.Func(name, usage, func(s string) error {
+ *value = s
+ return nil
+ })
+ }
+
boolVar(&opts.NoMixedArgs, "no-mixed-args", "enforce not mixing key-value pairs and attributes (default true)")
boolVar(&opts.KVOnly, "kv-only", "enforce using key-value pairs only (overrides -no-mixed-args, incompatible with -attr-only)")
boolVar(&opts.AttrOnly, "attr-only", "enforce using attributes only (overrides -no-mixed-args, incompatible with -kv-only)")
+ strVar(&opts.NoGlobal, "no-global", "enforce not using global loggers (all|default)")
boolVar(&opts.ContextOnly, "context-only", "enforce using methods that accept a context")
boolVar(&opts.StaticMsg, "static-msg", "enforce using static log messages")
boolVar(&opts.NoRawKeys, "no-raw-keys", "enforce using constants instead of raw keys")
+ strVar(&opts.KeyNamingCase, "key-naming-case", "enforce a single key naming convention (snake|kebab|camel|pascal)")
boolVar(&opts.ArgsOnSepLines, "args-on-sep-lines", "enforce putting arguments on separate lines")
- fs.Func("key-naming-case", "enforce a single key naming convention (snake|kebab|camel|pascal)", func(s string) error {
- opts.KeyNamingCase = s
- return nil
- })
-
- return *fs
+ return *fset
}
var slogFuncs = map[string]int{ // funcName:argsPos
@@ -139,17 +153,30 @@ func run(pass *analysis.Pass, opts *Options) {
return
}
- argsPos, ok := slogFuncs[fn.FullName()]
+ name := fn.FullName()
+ argsPos, ok := slogFuncs[name]
if !ok {
return
}
+ switch opts.NoGlobal {
+ case "all":
+ if strings.HasPrefix(name, "log/slog.") || globalLoggerUsed(pass.TypesInfo, call.Fun) {
+ pass.Reportf(call.Pos(), "global logger should not be used")
+ }
+ case "default":
+ if strings.HasPrefix(name, "log/slog.") {
+ pass.Reportf(call.Pos(), "default logger should not be used")
+ }
+ }
+
if opts.ContextOnly {
typ := pass.TypesInfo.TypeOf(call.Args[0])
if typ != nil && typ.String() != "context.Context" {
pass.Reportf(call.Pos(), "methods without a context should not be used")
}
}
+
if opts.StaticMsg && !staticMsg(call.Args[argsPos-1]) {
pass.Reportf(call.Pos(), "message should be a string literal or a constant")
}
@@ -189,6 +216,7 @@ func run(pass *analysis.Pass, opts *Options) {
if opts.NoRawKeys && rawKeysUsed(pass.TypesInfo, keys, attrs) {
pass.Reportf(call.Pos(), "raw keys should not be used")
}
+
if opts.ArgsOnSepLines && argsOnSameLine(pass.Fset, call, keys, attrs) {
pass.Reportf(call.Pos(), "arguments should be put on separate lines")
}
@@ -206,6 +234,19 @@ func run(pass *analysis.Pass, opts *Options) {
})
}
+func globalLoggerUsed(info *types.Info, expr ast.Expr) bool {
+ selector, ok := expr.(*ast.SelectorExpr)
+ if !ok {
+ return false
+ }
+ ident, ok := selector.X.(*ast.Ident)
+ if !ok {
+ return false
+ }
+ obj := info.ObjectOf(ident)
+ return obj.Parent() == obj.Pkg().Scope()
+}
+
func staticMsg(expr ast.Expr) bool {
switch msg := expr.(type) {
case *ast.BasicLit: // e.g. slog.Info("msg")
diff --git a/vendor/github.com/mbilski/exhaustivestruct/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE
similarity index 87%
rename from vendor/github.com/mbilski/exhaustivestruct/LICENSE
rename to vendor/go.uber.org/automaxprocs/LICENSE
index 893eb73b9f..20dcf51d96 100644
--- a/vendor/github.com/mbilski/exhaustivestruct/LICENSE
+++ b/vendor/go.uber.org/automaxprocs/LICENSE
@@ -1,6 +1,4 @@
-MIT License
-
-Copyright (c) 2020 Mateusz Bilski
+Copyright (c) 2017 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -9,13 +7,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go
new file mode 100644
index 0000000000..fe4ecf561e
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// CGroup represents the data structure for a Linux control group.
+type CGroup struct {
+ path string
+}
+
+// NewCGroup returns a new *CGroup from a given path.
+func NewCGroup(path string) *CGroup {
+ return &CGroup{path: path}
+}
+
+// Path returns the path of the CGroup*.
+func (cg *CGroup) Path() string {
+ return cg.path
+}
+
+// ParamPath returns the path of the given cgroup param under itself.
+func (cg *CGroup) ParamPath(param string) string {
+ return filepath.Join(cg.path, param)
+}
+
+// readFirstLine reads the first line from a cgroup param file.
+func (cg *CGroup) readFirstLine(param string) (string, error) {
+ paramFile, err := os.Open(cg.ParamPath(param))
+ if err != nil {
+ return "", err
+ }
+ defer paramFile.Close()
+
+ scanner := bufio.NewScanner(paramFile)
+ if scanner.Scan() {
+ return scanner.Text(), nil
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ return "", io.ErrUnexpectedEOF
+}
+
+// readInt parses the first line from a cgroup param file as int.
+func (cg *CGroup) readInt(param string) (int, error) {
+ text, err := cg.readFirstLine(param)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.Atoi(text)
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go
new file mode 100644
index 0000000000..e89f543602
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+const (
+ // _cgroupFSType is the Linux CGroup file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupFSType = "cgroup"
+ // _cgroupSubsysCPU is the CPU CGroup subsystem.
+ _cgroupSubsysCPU = "cpu"
+ // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem.
+ _cgroupSubsysCPUAcct = "cpuacct"
+ // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem.
+ _cgroupSubsysCPUSet = "cpuset"
+ // _cgroupSubsysMemory is the Memory CGroup subsystem.
+ _cgroupSubsysMemory = "memory"
+
+ // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota
+ // parameter.
+ _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us"
+ // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period
+ // parameter.
+ _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us"
+)
+
+const (
+ _procPathCGroup = "/proc/self/cgroup"
+ _procPathMountInfo = "/proc/self/mountinfo"
+)
+
+// CGroups is a map that associates each CGroup with its subsystem name.
+type CGroups map[string]*CGroup
+
+// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files
+// under for some process under `/proc` file system (see also proc(5) for more
+// information).
+func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
+ cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ cgroups := make(CGroups)
+ newMountPoint := func(mp *MountPoint) error {
+ if mp.FSType != _cgroupFSType {
+ return nil
+ }
+
+ for _, opt := range mp.SuperOptions {
+ subsys, exists := cgroupSubsystems[opt]
+ if !exists {
+ continue
+ }
+
+ cgroupPath, err := mp.Translate(subsys.Name)
+ if err != nil {
+ return err
+ }
+ cgroups[opt] = NewCGroup(cgroupPath)
+ }
+
+ return nil
+ }
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return nil, err
+ }
+ return cgroups, nil
+}
+
+// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current
+// process.
+func NewCGroupsForCurrentProcess() (CGroups, error) {
+ return NewCGroups(_procPathMountInfo, _procPathCGroup)
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup controller.
+// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of
+// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`.
+func (cg CGroups) CPUQuota() (float64, bool, error) {
+ cpuCGroup, exists := cg[_cgroupSubsysCPU]
+ if !exists {
+ return -1, false, nil
+ }
+
+ cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
+ if defined := cfsQuotaUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
+ if defined := cfsPeriodUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go
new file mode 100644
index 0000000000..78556062fe
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups2.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period
+ // parameter.
+ _cgroupv2CPUMax = "cpu.max"
+ // _cgroupFSType is the Linux CGroup-V2 file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupv2FSType = "cgroup2"
+
+ _cgroupv2MountPoint = "/sys/fs/cgroup"
+
+ _cgroupV2CPUMaxDefaultPeriod = 100000
+ _cgroupV2CPUMaxQuotaMax = "max"
+)
+
+const (
+ _cgroupv2CPUMaxQuotaIndex = iota
+ _cgroupv2CPUMaxPeriodIndex
+)
+
+// ErrNotV2 indicates that the system is not using cgroups2.
+var ErrNotV2 = errors.New("not using cgroups2")
+
+// CGroups2 provides access to cgroups data for systems using cgroups2.
+type CGroups2 struct {
+ mountPoint string
+ groupPath string
+ cpuMaxFile string
+}
+
+// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process.
+//
+// This returns ErrNotV2 if the system is not using cgroups2.
+func NewCGroups2ForCurrentProcess() (*CGroups2, error) {
+ return newCGroups2From(_procPathMountInfo, _procPathCGroup)
+}
+
+func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) {
+ isV2, err := isCGroupV2(mountInfoPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isV2 {
+ return nil, ErrNotV2
+ }
+
+ subsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find v2 subsystem by looking for the `0` id
+ var v2subsys *CGroupSubsys
+ for _, subsys := range subsystems {
+ if subsys.ID == 0 {
+ v2subsys = subsys
+ break
+ }
+ }
+
+ if v2subsys == nil {
+ return nil, ErrNotV2
+ }
+
+ return &CGroups2{
+ mountPoint: _cgroupv2MountPoint,
+ groupPath: v2subsys.Name,
+ cpuMaxFile: _cgroupv2CPUMax,
+ }, nil
+}
+
+func isCGroupV2(procPathMountInfo string) (bool, error) {
+ var (
+ isV2 bool
+ newMountPoint = func(mp *MountPoint) error {
+ isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint)
+ return nil
+ }
+ )
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return false, err
+ }
+
+ return isV2, nil
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller.
+// It is a result of reading cpu quota and period from cpu.max file.
+// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns
+// (-1, false, nil)
+func (cg *CGroups2) CPUQuota() (float64, bool, error) {
+ cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return -1, false, nil
+ }
+ return -1, false, err
+ }
+ defer cpuMaxParams.Close()
+
+ scanner := bufio.NewScanner(cpuMaxParams)
+ if scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 || len(fields) > 2 {
+ return -1, false, fmt.Errorf("invalid format")
+ }
+
+ if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax {
+ return -1, false, nil
+ }
+
+ max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ var period int
+ if len(fields) == 1 {
+ period = _cgroupV2CPUMaxDefaultPeriod
+ } else {
+ period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ if period == 0 {
+ return -1, false, errors.New("zero value for period is not allowed")
+ }
+ }
+
+ return float64(max) / float64(period), true, nil
+ }
+
+ if err := scanner.Err(); err != nil {
+ return -1, false, err
+ }
+
+ return 0, false, io.ErrUnexpectedEOF
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go
new file mode 100644
index 0000000000..113555f63d
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package cgroups provides utilities to access Linux control group (CGroups)
+// parameters (CPU quota, for example) for a given process.
+package cgroups
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go
new file mode 100644
index 0000000000..94ac75a46e
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import "fmt"
+
+type cgroupSubsysFormatInvalidError struct {
+ line string
+}
+
+type mountPointFormatInvalidError struct {
+ line string
+}
+
+type pathNotExposedFromMountPointError struct {
+ mountPoint string
+ root string
+ path string
+}
+
+func (err cgroupSubsysFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line)
+}
+
+func (err mountPointFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for MountPoint: %q", err.line)
+}
+
+func (err pathNotExposedFromMountPointError) Error() string {
+ return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint)
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go
new file mode 100644
index 0000000000..f3877f78aa
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const (
+ _mountInfoSep = " "
+ _mountInfoOptsSep = ","
+ _mountInfoOptionalFieldsSep = "-"
+)
+
+const (
+ _miFieldIDMountID = iota
+ _miFieldIDParentID
+ _miFieldIDDeviceID
+ _miFieldIDRoot
+ _miFieldIDMountPoint
+ _miFieldIDOptions
+ _miFieldIDOptionalFields
+
+ _miFieldCountFirstHalf
+)
+
+const (
+ _miFieldOffsetFSType = iota
+ _miFieldOffsetMountSource
+ _miFieldOffsetSuperOptions
+
+ _miFieldCountSecondHalf
+)
+
+const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf
+
+// MountPoint is the data structure for the mount points in
+// `/proc/$PID/mountinfo`. See also proc(5) for more information.
+type MountPoint struct {
+ MountID int
+ ParentID int
+ DeviceID string
+ Root string
+ MountPoint string
+ Options []string
+ OptionalFields []string
+ FSType string
+ MountSource string
+ SuperOptions []string
+}
+
+// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and
+// returns a new *MountPoint.
+func NewMountPointFromLine(line string) (*MountPoint, error) {
+ fields := strings.Split(line, _mountInfoSep)
+
+ if len(fields) < _miFieldCountMin {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ mountID, err := strconv.Atoi(fields[_miFieldIDMountID])
+ if err != nil {
+ return nil, err
+ }
+
+ parentID, err := strconv.Atoi(fields[_miFieldIDParentID])
+ if err != nil {
+ return nil, err
+ }
+
+ for i, field := range fields[_miFieldIDOptionalFields:] {
+ if field == _mountInfoOptionalFieldsSep {
+ // End of optional fields.
+ fsTypeStart := _miFieldIDOptionalFields + i + 1
+
+ // Now we know where the optional fields end, split the line again with a
+ // limit to avoid issues with spaces in super options as present on WSL.
+ fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf)
+ if len(fields) != fsTypeStart+_miFieldCountSecondHalf {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart
+ miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart
+ miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart
+
+ return &MountPoint{
+ MountID: mountID,
+ ParentID: parentID,
+ DeviceID: fields[_miFieldIDDeviceID],
+ Root: fields[_miFieldIDRoot],
+ MountPoint: fields[_miFieldIDMountPoint],
+ Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep),
+ OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)],
+ FSType: fields[miFieldIDFSType],
+ MountSource: fields[miFieldIDMountSource],
+ SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep),
+ }, nil
+ }
+ }
+
+ return nil, mountPointFormatInvalidError{line}
+}
+
+// Translate converts an absolute path inside the *MountPoint's file system to
+// the host file system path in the mount namespace the *MountPoint belongs to.
+func (mp *MountPoint) Translate(absPath string) (string, error) {
+ relPath, err := filepath.Rel(mp.Root, absPath)
+
+ if err != nil {
+ return "", err
+ }
+ if relPath == ".." || strings.HasPrefix(relPath, "../") {
+ return "", pathNotExposedFromMountPointError{
+ mountPoint: mp.MountPoint,
+ root: mp.Root,
+ path: absPath,
+ }
+ }
+
+ return filepath.Join(mp.MountPoint, relPath), nil
+}
+
+// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
+// and yields parsed *MountPoint into newMountPoint.
+func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
+ mountInfoFile, err := os.Open(procPathMountInfo)
+ if err != nil {
+ return err
+ }
+ defer mountInfoFile.Close()
+
+ scanner := bufio.NewScanner(mountInfoFile)
+
+ for scanner.Scan() {
+ mountPoint, err := NewMountPointFromLine(scanner.Text())
+ if err != nil {
+ return err
+ }
+ if err := newMountPoint(mountPoint); err != nil {
+ return err
+ }
+ }
+
+ return scanner.Err()
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go
new file mode 100644
index 0000000000..cddc3eaec3
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package cgroups
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ _cgroupSep = ":"
+ _cgroupSubsysSep = ","
+)
+
+const (
+ _csFieldIDID = iota
+ _csFieldIDSubsystems
+ _csFieldIDName
+ _csFieldCount
+)
+
+// CGroupSubsys represents the data structure for entities in
+// `/proc/$PID/cgroup`. See also proc(5) for more information.
+type CGroupSubsys struct {
+ ID int
+ Subsystems []string
+ Name string
+}
+
+// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in
+// the format of `/proc/$PID/cgroup`
+func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) {
+ fields := strings.SplitN(line, _cgroupSep, _csFieldCount)
+
+ if len(fields) != _csFieldCount {
+ return nil, cgroupSubsysFormatInvalidError{line}
+ }
+
+ id, err := strconv.Atoi(fields[_csFieldIDID])
+ if err != nil {
+ return nil, err
+ }
+
+ cgroup := &CGroupSubsys{
+ ID: id,
+ Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep),
+ Name: fields[_csFieldIDName],
+ }
+
+ return cgroup, nil
+}
+
+// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`)
+// and returns a new map[string]*CGroupSubsys.
+func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) {
+ cgroupFile, err := os.Open(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+ defer cgroupFile.Close()
+
+ scanner := bufio.NewScanner(cgroupFile)
+ subsystems := make(map[string]*CGroupSubsys)
+
+ for scanner.Scan() {
+ cgroup, err := NewCGroupSubsysFromLine(scanner.Text())
+ if err != nil {
+ return nil, err
+ }
+ for _, subsys := range cgroup.Subsystems {
+ subsystems[subsys] = cgroup
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return subsystems, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
new file mode 100644
index 0000000000..3b974754c3
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package runtime
+
+import (
+ "errors"
+ "math"
+
+ cg "go.uber.org/automaxprocs/internal/cgroups"
+)
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value.
+func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
+ cgroups, err := newQueryer()
+ if err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ quota, defined, err := cgroups.CPUQuota()
+ if !defined || err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ maxProcs := int(math.Floor(quota))
+ if minValue > 0 && maxProcs < minValue {
+ return minValue, CPUQuotaMinUsed, nil
+ }
+ return maxProcs, CPUQuotaUsed, nil
+}
+
+type queryer interface {
+ CPUQuota() (float64, bool, error)
+}
+
+var (
+ _newCgroups2 = cg.NewCGroups2ForCurrentProcess
+ _newCgroups = cg.NewCGroupsForCurrentProcess
+)
+
+func newQueryer() (queryer, error) {
+ cgroups, err := _newCgroups2()
+ if err == nil {
+ return cgroups, nil
+ }
+ if errors.Is(err, cg.ErrNotV2) {
+ return _newCgroups()
+ }
+ return nil, err
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
new file mode 100644
index 0000000000..6922554484
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !linux
+// +build !linux
+
+package runtime
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
+// current OS.
+func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) {
+ return -1, CPUQuotaUndefined, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
new file mode 100644
index 0000000000..df6eacf053
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package runtime
+
+// CPUQuotaStatus presents the status of how CPU quota is used
+type CPUQuotaStatus int
+
+const (
+ // CPUQuotaUndefined is returned when CPU quota is undefined
+ CPUQuotaUndefined CPUQuotaStatus = iota
+ // CPUQuotaUsed is returned when a valid CPU quota can be used
+ CPUQuotaUsed
+ // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
+ CPUQuotaMinUsed
+)
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
new file mode 100644
index 0000000000..98176d6457
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to
+// match the configured Linux CPU quota. Unlike the top-level automaxprocs
+// package, it lets the caller configure logging and handle errors.
+package maxprocs // import "go.uber.org/automaxprocs/maxprocs"
+
+import (
+ "os"
+ "runtime"
+
+ iruntime "go.uber.org/automaxprocs/internal/runtime"
+)
+
+const _maxProcsKey = "GOMAXPROCS"
+
+func currentMaxProcs() int {
+ return runtime.GOMAXPROCS(0)
+}
+
+type config struct {
+ printf func(string, ...interface{})
+ procs func(int) (int, iruntime.CPUQuotaStatus, error)
+ minGOMAXPROCS int
+}
+
+func (c *config) log(fmt string, args ...interface{}) {
+ if c.printf != nil {
+ c.printf(fmt, args...)
+ }
+}
+
+// An Option alters the behavior of Set.
+type Option interface {
+ apply(*config)
+}
+
+// Logger uses the supplied printf implementation for log output. By default,
+// Set doesn't log anything.
+func Logger(printf func(string, ...interface{})) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.printf = printf
+ })
+}
+
+// Min sets the minimum GOMAXPROCS value that will be used.
+// Any value below 1 is ignored.
+func Min(n int) Option {
+ return optionFunc(func(cfg *config) {
+ if n >= 1 {
+ cfg.minGOMAXPROCS = n
+ }
+ })
+}
+
+type optionFunc func(*config)
+
+func (of optionFunc) apply(cfg *config) { of(cfg) }
+
+// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
+// any error encountered and an undo function.
+//
+// Set is a no-op on non-Linux systems and in Linux environments without a
+// configured CPU quota.
+func Set(opts ...Option) (func(), error) {
+ cfg := &config{
+ procs: iruntime.CPUQuotaToGOMAXPROCS,
+ minGOMAXPROCS: 1,
+ }
+ for _, o := range opts {
+ o.apply(cfg)
+ }
+
+ undoNoop := func() {
+ cfg.log("maxprocs: No GOMAXPROCS change to reset")
+ }
+
+ // Honor the GOMAXPROCS environment variable if present. Otherwise, amend
+ // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is
+ // Linux, and guarantee a minimum value of 1. The minimum guaranteed value
+ // can be overridden using `maxprocs.Min()`.
+ if max, exists := os.LookupEnv(_maxProcsKey); exists {
+ cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max)
+ return undoNoop, nil
+ }
+
+ maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS)
+ if err != nil {
+ return undoNoop, err
+ }
+
+ if status == iruntime.CPUQuotaUndefined {
+ cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs())
+ return undoNoop, nil
+ }
+
+ prev := currentMaxProcs()
+ undo := func() {
+ cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev)
+ runtime.GOMAXPROCS(prev)
+ }
+
+ switch status {
+ case iruntime.CPUQuotaMinUsed:
+ cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs)
+ case iruntime.CPUQuotaUsed:
+ cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs)
+ }
+
+ runtime.GOMAXPROCS(maxProcs)
+ return undo, nil
+}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
new file mode 100644
index 0000000000..108a95535e
--- /dev/null
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package maxprocs
+
+// Version is the current package version.
+const Version = "1.5.2"
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 0000000000..2c033dff47
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
new file mode 100644
index 0000000000..fbf1934a06
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 0000000000..5e8158bba8
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,499 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+package slices
+
+import (
+ "unsafe"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using an equality
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[S ~[]E, E comparable](s S, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
+ copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
+ return s2
+ }
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// When DeleteFunc removes m elements, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage
+// collected.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 0000000000..b67897f76b
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,195 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b.
+//
+// SortFunc requires that cmp is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ n := len(x)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmpLess(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmp(x[i], x[i-1]) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+}
+
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
new file mode 100644
index 0000000000..06f2c7a248
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownCmpFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
+ child++
+ }
+ if !(cmp(data[first+root], data[first+child]) < 0) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownCmpFunc(data, i, hi, first, cmp)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownCmpFunc(data, lo, i, first, cmp)
+ }
+}
+
+// pdqsortCmpFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsCmpFunc(data, a, b, cmp)
+ limit--
+ }
+
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
+ if hint == decreasingHint {
+ reverseRangeCmpFunc(data, a, b, cmp)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
+ b = mid
+ }
+ }
+}
+
+// partitionCmpFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 0000000000..99b47c3986
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !cmpLess(data[a], data[i]) {
+ i++
+ }
+ for i <= j && cmpLess(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !cmpLess(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if cmpLess(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmpLess(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !cmpLess(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !cmpLess(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go
index 35fd1f534c..26acaa5f7c 100644
--- a/vendor/golang.org/x/mod/modfile/rule.go
+++ b/vendor/golang.org/x/mod/modfile/rule.go
@@ -308,6 +308,7 @@ var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9].
// Toolchains must be named beginning with `go1`,
// like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted.
+// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+
var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`)
func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) {
@@ -384,7 +385,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a
errorf("toolchain directive expects exactly one argument")
return
} else if strict && !ToolchainRE.MatchString(args[0]) {
- errorf("invalid toolchain version '%s': must match format go1.23.0 or local", args[0])
+ errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0])
return
}
f.Toolchain = &Toolchain{Syntax: line}
@@ -630,7 +631,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string,
errorf("go directive expects exactly one argument")
return
} else if !GoVersionRE.MatchString(args[0]) {
- errorf("invalid go version '%s': must match format 1.23", args[0])
+ errorf("invalid go version '%s': must match format 1.23.0", args[0])
return
}
@@ -646,7 +647,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string,
errorf("toolchain directive expects exactly one argument")
return
} else if !ToolchainRE.MatchString(args[0]) {
- errorf("invalid toolchain version '%s': must match format go1.23 or local", args[0])
+ errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0])
return
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go
deleted file mode 100644
index d8211afdc8..0000000000
--- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go118.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.19
-// +build !go1.19
-
-package asmdecl
-
-func additionalArches() []*asmArch {
- return nil
-}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go
deleted file mode 100644
index 3018383e7f..0000000000
--- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/arches_go119.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19
-// +build go1.19
-
-package asmdecl
-
-var asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true}
-
-func additionalArches() []*asmArch {
- return []*asmArch{&asmArchLoong64}
-}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
index e24dac9865..f2ca95aa9e 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go
@@ -96,6 +96,7 @@ var (
asmArchRISCV64 = asmArch{name: "riscv64", bigEndian: false, stack: "SP", lr: true, retRegs: []string{"X10", "F10"}}
asmArchS390X = asmArch{name: "s390x", bigEndian: true, stack: "R15", lr: true}
asmArchWasm = asmArch{name: "wasm", bigEndian: false, stack: "SP", lr: false}
+ asmArchLoong64 = asmArch{name: "loong64", bigEndian: false, stack: "R3", lr: true}
arches = []*asmArch{
&asmArch386,
@@ -111,11 +112,11 @@ var (
&asmArchRISCV64,
&asmArchS390X,
&asmArchWasm,
+ &asmArchLoong64,
}
)
func init() {
- arches = append(arches, additionalArches()...)
for _, arch := range arches {
arch.sizes = types.SizesFor("gc", arch.name)
if arch.sizes == nil {
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go
index 847063bb32..6b126f897d 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go
@@ -15,6 +15,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -71,7 +72,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
return
}
var structuralTypes []types.Type
- switch typ := typ.(type) {
+ switch typ := aliases.Unalias(typ).(type) {
case *types.TypeParam:
terms, err := typeparams.StructuralTerms(typ)
if err != nil {
@@ -84,7 +85,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
structuralTypes = append(structuralTypes, typ)
}
for _, typ := range structuralTypes {
- under := deref(typ.Underlying())
+ // TODO(adonovan): this operation is questionable.
+ under := aliases.Unalias(deref(typ.Underlying()))
strct, ok := under.(*types.Struct)
if !ok {
// skip non-struct composite literals
@@ -142,9 +144,11 @@ func run(pass *analysis.Pass) (interface{}, error) {
return nil, nil
}
+// Note: this is not the usual deref operator!
+// It strips off all Pointer constructors (and their Aliases).
func deref(typ types.Type) types.Type {
for {
- ptr, ok := typ.(*types.Pointer)
+ ptr, ok := aliases.Unalias(typ).(*types.Pointer)
if !ok {
break
}
@@ -153,18 +157,18 @@ func deref(typ types.Type) types.Type {
return typ
}
+// isLocalType reports whether typ belongs to the same package as pass.
+// TODO(adonovan): local means "internal to a function"; rename to isSamePackageType.
func isLocalType(pass *analysis.Pass, typ types.Type) bool {
- switch x := typ.(type) {
+ switch x := aliases.Unalias(typ).(type) {
case *types.Struct:
// struct literals are local types
return true
case *types.Pointer:
return isLocalType(pass, x.Elem())
- case *types.Named:
+ case interface{ Obj() *types.TypeName }: // *Named or *TypeParam (aliases were removed already)
// names in package foo are local to foo_test too
return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test")
- case *types.TypeParam:
- return strings.TrimSuffix(x.Obj().Pkg().Path(), "_test") == strings.TrimSuffix(pass.Pkg.Path(), "_test")
}
return false
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
index 6cbbc7e814..8f39159c0f 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go
@@ -18,6 +18,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -255,7 +256,7 @@ func lockPath(tpkg *types.Package, typ types.Type, seen map[types.Type]bool) typ
}
seen[typ] = true
- if tpar, ok := typ.(*types.TypeParam); ok {
+ if tpar, ok := aliases.Unalias(typ).(*types.TypeParam); ok {
terms, err := typeparams.StructuralTerms(tpar)
if err != nil {
return nil // invalid type
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go
index 5e17bd1ab9..95cd9a061e 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/deepequalerrors/deepequalerrors.go
@@ -102,8 +102,7 @@ func containsError(typ types.Type) bool {
return true
}
}
- case *types.Named,
- *aliases.Alias:
+ case *types.Named, *aliases.Alias:
return check(t.Underlying())
// We list the remaining valid type kinds for completeness.
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
index c6b6c81b42..047ae07cca 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/httpresponse/httpresponse.go
@@ -14,6 +14,8 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
)
const Doc = `check for mistakes using HTTP responses
@@ -116,7 +118,8 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool {
if res.Len() != 2 {
return false // the function called does not return two values.
}
- if ptr, ok := res.At(0).Type().(*types.Pointer); !ok || !analysisutil.IsNamedType(ptr.Elem(), "net/http", "Response") {
+ isPtr, named := typesinternal.ReceiverNamed(res.At(0))
+ if !isPtr || !analysisutil.IsNamedType(named, "net/http", "Response") {
return false // the first return type is not *http.Response.
}
@@ -134,7 +137,7 @@ func isHTTPFuncOrMethodOnClient(info *types.Info, expr *ast.CallExpr) bool {
if analysisutil.IsNamedType(typ, "net/http", "Client") {
return true // method on http.Client.
}
- ptr, ok := typ.(*types.Pointer)
+ ptr, ok := aliases.Unalias(typ).(*types.Pointer)
return ok && analysisutil.IsNamedType(ptr.Elem(), "net/http", "Client") // method on *http.Client.
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
index 12507f9967..a077d44024 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/parameterized.go
@@ -7,6 +7,7 @@ package ifaceassert
import (
"go/types"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -94,6 +95,10 @@ func (w *tpWalker) isParameterized(typ types.Type) (res bool) {
case *types.Chan:
return w.isParameterized(t.Elem())
+ case *aliases.Alias:
+ // TODO(adonovan): think about generic aliases.
+ return w.isParameterized(aliases.Unalias(t))
+
case *types.Named:
list := t.TypeArgs()
for i, n := 0, list.Len(); i < n; i++ {
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
index 3f01b3b55d..89291602a5 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/internal/analysisutil/util.go
@@ -14,6 +14,7 @@ import (
"go/types"
"os"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/analysisinternal"
)
@@ -115,7 +116,7 @@ func Imports(pkg *types.Package, path string) bool {
// This function avoids allocating the concatenation of "pkg.Name",
// which is important for the performance of syntax matching.
func IsNamedType(t types.Type, pkgPath string, names ...string) bool {
- n, ok := t.(*types.Named)
+ n, ok := aliases.Unalias(t).(*types.Named)
if !ok {
return false
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
index 4724c9f3b1..fe05eda44e 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go
@@ -14,6 +14,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typesinternal"
"golang.org/x/tools/internal/versions"
)
@@ -54,9 +55,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
switch n := n.(type) {
case *ast.File:
// Only traverse the file if its goversion is strictly before go1.22.
- goversion := versions.Lang(versions.FileVersions(pass.TypesInfo, n))
- // goversion is empty for older go versions (or the version is invalid).
- return goversion == "" || versions.Compare(goversion, "go1.22") < 0
+ goversion := versions.FileVersion(pass.TypesInfo, n)
+ return versions.Before(goversion, versions.Go1_22)
case *ast.RangeStmt:
body = n.Body
addVar(n.Key)
@@ -367,9 +367,6 @@ func isMethodCall(info *types.Info, expr ast.Expr, pkgPath, typeName, method str
// Check that the receiver is a . or
// *..
- rtype := recv.Type()
- if ptr, ok := recv.Type().(*types.Pointer); ok {
- rtype = ptr.Elem()
- }
- return analysisutil.IsNamedType(rtype, pkgPath, typeName)
+ _, named := typesinternal.ReceiverNamed(recv)
+ return analysisutil.IsNamedType(named, pkgPath, typeName)
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go
index 5e14c096ab..774f04c94a 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go
@@ -52,7 +52,7 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) {
// notNil reports an error if v is provably nil.
notNil := func(stack []fact, instr ssa.Instruction, v ssa.Value, descr string) {
if nilnessOf(stack, v) == isnil {
- reportf("nilderef", instr.Pos(), "nil dereference in "+descr)
+ reportf("nilderef", instr.Pos(), descr)
}
}
@@ -77,29 +77,50 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) {
// A nil receiver may be okay for type params.
cc := instr.Common()
if !(cc.IsInvoke() && typeparams.IsTypeParam(cc.Value.Type())) {
- notNil(stack, instr, cc.Value, cc.Description())
+ notNil(stack, instr, cc.Value, "nil dereference in "+cc.Description())
}
case *ssa.FieldAddr:
- notNil(stack, instr, instr.X, "field selection")
+ notNil(stack, instr, instr.X, "nil dereference in field selection")
case *ssa.IndexAddr:
- notNil(stack, instr, instr.X, "index operation")
+ switch typeparams.CoreType(instr.X.Type()).(type) {
+ case *types.Pointer: // *array
+ notNil(stack, instr, instr.X, "nil dereference in array index operation")
+ case *types.Slice:
+ // This is not necessarily a runtime error, because
+ // it is usually dominated by a bounds check.
+ if isRangeIndex(instr) {
+ notNil(stack, instr, instr.X, "range of nil slice")
+ } else {
+ notNil(stack, instr, instr.X, "index of nil slice")
+ }
+ }
case *ssa.MapUpdate:
- notNil(stack, instr, instr.Map, "map update")
+ notNil(stack, instr, instr.Map, "nil dereference in map update")
+ case *ssa.Range:
+ // (Not a runtime error, but a likely mistake.)
+ notNil(stack, instr, instr.X, "range over nil map")
case *ssa.Slice:
// A nilcheck occurs in ptr[:] iff ptr is a pointer to an array.
- if _, ok := instr.X.Type().Underlying().(*types.Pointer); ok {
- notNil(stack, instr, instr.X, "slice operation")
+ if is[*types.Pointer](instr.X.Type().Underlying()) {
+ notNil(stack, instr, instr.X, "nil dereference in slice operation")
}
case *ssa.Store:
- notNil(stack, instr, instr.Addr, "store")
+ notNil(stack, instr, instr.Addr, "nil dereference in store")
case *ssa.TypeAssert:
if !instr.CommaOk {
- notNil(stack, instr, instr.X, "type assertion")
+ notNil(stack, instr, instr.X, "nil dereference in type assertion")
}
case *ssa.UnOp:
- if instr.Op == token.MUL { // *X
- notNil(stack, instr, instr.X, "load")
+ switch instr.Op {
+ case token.MUL: // *X
+ notNil(stack, instr, instr.X, "nil dereference in load")
+ case token.ARROW: // <-ch
+ // (Not a runtime error, but a likely mistake.)
+ notNil(stack, instr, instr.X, "receive from nil channel")
}
+ case *ssa.Send:
+ // (Not a runtime error, but a likely mistake.)
+ notNil(stack, instr, instr.Chan, "send to nil channel")
}
}
@@ -416,3 +437,39 @@ func isNillable(t types.Type) bool {
}
return false
}
+
+// isRangeIndex reports whether the instruction is a slice indexing
+// operation slice[i] within a "for range slice" loop. The operation
+// could be explicit, such as slice[i] within (or even after) the
+// loop, or it could be implicit, such as "for i, v := range slice {}".
+// (These cannot be reliably distinguished.)
+func isRangeIndex(instr *ssa.IndexAddr) bool {
+ // Here we reverse-engineer the go/ssa lowering of range-over-slice:
+ //
+ // n = len(x)
+ // jump loop
+ // loop: "rangeindex.loop"
+ // phi = φ(-1, incr) #rangeindex
+ // incr = phi + 1
+ // cond = incr < n
+ // if cond goto body else done
+ // body: "rangeindex.body"
+ // instr = &x[incr]
+ // ...
+ // done:
+ if incr, ok := instr.Index.(*ssa.BinOp); ok && incr.Op == token.ADD {
+ if b := incr.Block(); b.Comment == "rangeindex.loop" {
+ if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok {
+ if cond := If.Cond.(*ssa.BinOp); cond.X == incr && cond.Op == token.LSS {
+ if call, ok := cond.Y.(*ssa.Call); ok {
+ common := call.Common()
+ if blt, ok := common.Value.(*ssa.Builtin); ok && blt.Name() == "len" {
+ return common.Args[0] == instr.X
+ }
+ }
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
index 070654f012..3235019258 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go
@@ -24,6 +24,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -959,6 +960,8 @@ func isStringer(sig *types.Signature) bool {
// It is almost always a mistake to print a function value.
func isFunctionValue(pass *analysis.Pass, e ast.Expr) bool {
if typ := pass.TypesInfo.Types[e].Type; typ != nil {
+ // Don't call Underlying: a named func type with a String method is ok.
+ // TODO(adonovan): it would be more precise to check isStringer.
_, ok := typ.(*types.Signature)
return ok
}
@@ -1010,7 +1013,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) {
// Skip checking functions with unknown type.
return
}
- if sig, ok := typ.(*types.Signature); ok {
+ if sig, ok := typ.Underlying().(*types.Signature); ok {
if !sig.Variadic() {
// Skip checking non-variadic functions.
return
@@ -1020,7 +1023,7 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) {
typ := params.At(firstArg).Type()
typ = typ.(*types.Slice).Elem()
- it, ok := typ.(*types.Interface)
+ it, ok := aliases.Unalias(typ).(*types.Interface)
if !ok || !it.Empty() {
// Skip variadic functions accepting non-interface{} args.
return
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go
index ab98e56998..017c8a247e 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go
@@ -10,6 +10,7 @@ import (
"go/types"
"golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -72,7 +73,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool {
return true
}
- if typ, _ := typ.(*types.TypeParam); typ != nil {
+ if typ, _ := aliases.Unalias(typ).(*types.TypeParam); typ != nil {
// Avoid infinite recursion through type parameters.
if m.seen[typ] {
return true
@@ -275,7 +276,7 @@ func (m *argMatcher) match(typ types.Type, topLevel bool) bool {
}
func isConvertibleToString(typ types.Type) bool {
- if bt, ok := typ.(*types.Basic); ok && bt.Kind() == types.UntypedNil {
+ if bt, ok := aliases.Unalias(typ).(*types.Basic); ok && bt.Kind() == types.UntypedNil {
// We explicitly don't want untyped nil, which is
// convertible to both of the interfaces below, as it
// would just panic anyway.
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
index e272df709f..d01eb1eebe 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go
@@ -21,6 +21,7 @@ import (
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -89,7 +90,8 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) {
if v == nil {
return
}
- amt, ok := constant.Int64Val(v)
+ u := constant.ToInt(v) // either an Int or Unknown
+ amt, ok := constant.Int64Val(u)
if !ok {
return
}
@@ -98,7 +100,7 @@ func checkLongShift(pass *analysis.Pass, node ast.Node, x, y ast.Expr) {
return
}
var structuralTypes []types.Type
- switch t := t.(type) {
+ switch t := aliases.Unalias(t).(type) {
case *types.TypeParam:
terms, err := typeparams.StructuralTerms(t)
if err != nil {
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
index a1323c3e66..b3c683b61c 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/slog/slog.go
@@ -20,6 +20,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -150,14 +151,10 @@ func isAttr(t types.Type) bool {
func shortName(fn *types.Func) string {
var r string
if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
- t := recv.Type()
- if pt, ok := t.(*types.Pointer); ok {
- t = pt.Elem()
- }
- if nt, ok := t.(*types.Named); ok {
- r = nt.Obj().Name()
+ if _, named := typesinternal.ReceiverNamed(recv); named != nil {
+ r = named.Obj().Name()
} else {
- r = recv.Type().String()
+ r = recv.Type().String() // anon struct/interface
}
r += "."
}
@@ -173,17 +170,12 @@ func kvFuncSkipArgs(fn *types.Func) (int, bool) {
return 0, false
}
var recvName string // by default a slog package function
- recv := fn.Type().(*types.Signature).Recv()
- if recv != nil {
- t := recv.Type()
- if pt, ok := t.(*types.Pointer); ok {
- t = pt.Elem()
- }
- if nt, ok := t.(*types.Named); !ok {
- return 0, false
- } else {
- recvName = nt.Obj().Name()
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ _, named := typesinternal.ReceiverNamed(recv)
+ if named == nil {
+ return 0, false // anon struct/interface
}
+ recvName = named.Obj().Name()
}
skip, ok := kvFuncs[recvName][fn.Name()]
return skip, ok
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
index 005e2e54b7..16a4b3e551 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go
@@ -60,10 +60,12 @@ func describe(typ, inType types.Type, inName string) string {
}
func typeName(typ types.Type) string {
- if v, _ := typ.(interface{ Name() string }); v != nil {
+ typ = aliases.Unalias(typ)
+ // TODO(adonovan): don't discard alias type, return its name.
+ if v, _ := typ.(*types.Basic); v != nil {
return v.Name()
}
- if v, _ := typ.(interface{ Obj() *types.TypeName }); v != nil {
+ if v, _ := typ.(interface{ Obj() *types.TypeName }); v != nil { // Named, TypeParam
return v.Obj().Name()
}
return ""
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
index dc5307a15d..828f95bc86 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go
@@ -17,6 +17,7 @@ import (
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/aliases"
)
//go:embed doc.go
@@ -270,7 +271,7 @@ func forbiddenMethod(info *types.Info, call *ast.CallExpr) (*types.Var, *types.S
func formatMethod(sel *types.Selection, fn *types.Func) string {
var ptr string
rtype := sel.Recv()
- if p, ok := rtype.(*types.Pointer); ok {
+ if p, ok := aliases.Unalias(rtype).(*types.Pointer); ok {
ptr = "*"
rtype = p.Elem()
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go
index d156851db1..ad815f1901 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/util.go
@@ -30,7 +30,7 @@ func localFunctionDecls(info *types.Info, files []*ast.File) func(*types.Func) *
}
}
}
- // TODO: once we only support go1.19+, set f = f.Origin() here.
+ // TODO: set f = f.Origin() here.
return fnDecls[f]
}
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
index 6db12f3cb9..39d0d9e429 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go
@@ -252,6 +252,8 @@ func validateFuzzArgs(pass *analysis.Pass, params *types.Tuple, expr ast.Expr) b
}
func isTestingType(typ types.Type, testingType string) bool {
+ // No Unalias here: I doubt "go test" recognizes
+ // "type A = *testing.T; func Test(A) {}" as a test.
ptr, ok := typ.(*types.Pointer)
if !ok {
return false
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
index eb84502bd9..4a6c6b8bc6 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/timeformat/timeformat.go
@@ -107,7 +107,7 @@ func badFormatAt(info *types.Info, e ast.Expr) int {
return -1
}
- t, ok := tv.Type.(*types.Basic)
+ t, ok := tv.Type.(*types.Basic) // sic, no unalias
if !ok || t.Info()&types.IsString == 0 {
return -1
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
index f4e73528b4..a7889fa459 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/unmarshal/unmarshal.go
@@ -14,6 +14,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typesinternal"
)
//go:embed doc.go
@@ -69,12 +70,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
// (*"encoding/json".Decoder).Decode
// (* "encoding/gob".Decoder).Decode
// (* "encoding/xml".Decoder).Decode
- t := recv.Type()
- if ptr, ok := t.(*types.Pointer); ok {
- t = ptr.Elem()
- }
- tname := t.(*types.Named).Obj()
- if tname.Name() == "Decoder" {
+ _, named := typesinternal.ReceiverNamed(recv)
+ if tname := named.Obj(); tname.Name() == "Decoder" {
switch tname.Pkg().Path() {
case "encoding/json", "encoding/xml", "encoding/gob":
argidx = 0 // func(interface{})
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
index 32e71ef979..14e4a6c1e4 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go
@@ -17,6 +17,7 @@ import (
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
)
//go:embed doc.go
@@ -88,7 +89,7 @@ func isSafeUintptr(info *types.Info, x ast.Expr) bool {
// by the time we get to the conversion at the end.
// For now approximate by saying that *Header is okay
// but Header is not.
- pt, ok := info.Types[x.X].Type.(*types.Pointer)
+ pt, ok := aliases.Unalias(info.Types[x.X].Type).(*types.Pointer)
if ok && isReflectHeader(pt.Elem()) {
return true
}
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go
index f5d0f116ca..a01cbb8f83 100644
--- a/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go
+++ b/vendor/golang.org/x/tools/go/analysis/passes/unusedwrite/unusedwrite.go
@@ -13,6 +13,7 @@ import (
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/analysis/passes/internal/analysisutil"
"golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/internal/aliases"
)
//go:embed doc.go
@@ -124,10 +125,7 @@ func isDeadStore(store *ssa.Store, obj ssa.Value, addr ssa.Instruction) bool {
// isStructOrArray returns whether the underlying type is struct or array.
func isStructOrArray(tp types.Type) bool {
- if named, ok := tp.(*types.Named); ok {
- tp = named.Underlying()
- }
- switch tp.(type) {
+ switch tp.Underlying().(type) {
case *types.Array:
return true
case *types.Struct:
@@ -145,7 +143,7 @@ func hasStructOrArrayType(v ssa.Value) bool {
// func (t T) f() { ...}
// the receiver object is of type *T:
// t0 = local T (t) *T
- if tp, ok := alloc.Type().(*types.Pointer); ok {
+ if tp, ok := aliases.Unalias(alloc.Type()).(*types.Pointer); ok {
return isStructOrArray(tp.Elem())
}
return false
@@ -159,13 +157,14 @@ func hasStructOrArrayType(v ssa.Value) bool {
//
// For example, for struct T {x int, y int), getFieldName(*T, 1) returns "y".
func getFieldName(tp types.Type, index int) string {
- if pt, ok := tp.(*types.Pointer); ok {
+ // TODO(adonovan): use
+ // stp, ok := typeparams.Deref(tp).Underlying().(*types.Struct); ok {
+ // when Deref is defined. But see CL 565456 for a better fix.
+
+ if pt, ok := aliases.Unalias(tp).(*types.Pointer); ok {
tp = pt.Elem()
}
- if named, ok := tp.(*types.Named); ok {
- tp = named.Underlying()
- }
- if stp, ok := tp.(*types.Struct); ok {
+ if stp, ok := tp.Underlying().(*types.Struct); ok {
return stp.Field(index).Name()
}
return fmt.Sprintf("%d", index)
diff --git a/vendor/golang.org/x/tools/go/cfg/builder.go b/vendor/golang.org/x/tools/go/cfg/builder.go
index dad6a444d8..ac4d63c400 100644
--- a/vendor/golang.org/x/tools/go/cfg/builder.go
+++ b/vendor/golang.org/x/tools/go/cfg/builder.go
@@ -16,8 +16,8 @@ type builder struct {
cfg *CFG
mayReturn func(*ast.CallExpr) bool
current *Block
- lblocks map[*ast.Object]*lblock // labeled blocks
- targets *targets // linked stack of branch targets
+ lblocks map[string]*lblock // labeled blocks
+ targets *targets // linked stack of branch targets
}
func (b *builder) stmt(_s ast.Stmt) {
@@ -42,7 +42,7 @@ start:
b.add(s)
if call, ok := s.X.(*ast.CallExpr); ok && !b.mayReturn(call) {
// Calls to panic, os.Exit, etc, never return.
- b.current = b.newBlock("unreachable.call")
+ b.current = b.newBlock(KindUnreachable, s)
}
case *ast.DeclStmt:
@@ -57,7 +57,7 @@ start:
}
case *ast.LabeledStmt:
- label = b.labeledBlock(s.Label)
+ label = b.labeledBlock(s.Label, s)
b.jump(label._goto)
b.current = label._goto
_s = s.Stmt
@@ -65,7 +65,7 @@ start:
case *ast.ReturnStmt:
b.add(s)
- b.current = b.newBlock("unreachable.return")
+ b.current = b.newBlock(KindUnreachable, s)
case *ast.BranchStmt:
b.branchStmt(s)
@@ -77,11 +77,11 @@ start:
if s.Init != nil {
b.stmt(s.Init)
}
- then := b.newBlock("if.then")
- done := b.newBlock("if.done")
+ then := b.newBlock(KindIfThen, s)
+ done := b.newBlock(KindIfDone, s)
_else := done
if s.Else != nil {
- _else = b.newBlock("if.else")
+ _else = b.newBlock(KindIfElse, s)
}
b.add(s.Cond)
b.ifelse(then, _else)
@@ -128,7 +128,7 @@ func (b *builder) branchStmt(s *ast.BranchStmt) {
switch s.Tok {
case token.BREAK:
if s.Label != nil {
- if lb := b.labeledBlock(s.Label); lb != nil {
+ if lb := b.labeledBlock(s.Label, nil); lb != nil {
block = lb._break
}
} else {
@@ -139,7 +139,7 @@ func (b *builder) branchStmt(s *ast.BranchStmt) {
case token.CONTINUE:
if s.Label != nil {
- if lb := b.labeledBlock(s.Label); lb != nil {
+ if lb := b.labeledBlock(s.Label, nil); lb != nil {
block = lb._continue
}
} else {
@@ -155,14 +155,14 @@ func (b *builder) branchStmt(s *ast.BranchStmt) {
case token.GOTO:
if s.Label != nil {
- block = b.labeledBlock(s.Label)._goto
+ block = b.labeledBlock(s.Label, nil)._goto
}
}
- if block == nil {
- block = b.newBlock("undefined.branch")
+ if block == nil { // ill-typed (e.g. undefined label)
+ block = b.newBlock(KindUnreachable, s)
}
b.jump(block)
- b.current = b.newBlock("unreachable.branch")
+ b.current = b.newBlock(KindUnreachable, s)
}
func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) {
@@ -172,7 +172,7 @@ func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) {
if s.Tag != nil {
b.add(s.Tag)
}
- done := b.newBlock("switch.done")
+ done := b.newBlock(KindSwitchDone, s)
if label != nil {
label._break = done
}
@@ -188,13 +188,13 @@ func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) {
for i, clause := range s.Body.List {
body := fallthru
if body == nil {
- body = b.newBlock("switch.body") // first case only
+ body = b.newBlock(KindSwitchCaseBody, clause) // first case only
}
// Preallocate body block for the next case.
fallthru = done
if i+1 < ncases {
- fallthru = b.newBlock("switch.body")
+ fallthru = b.newBlock(KindSwitchCaseBody, s.Body.List[i+1])
}
cc := clause.(*ast.CaseClause)
@@ -208,7 +208,7 @@ func (b *builder) switchStmt(s *ast.SwitchStmt, label *lblock) {
var nextCond *Block
for _, cond := range cc.List {
- nextCond = b.newBlock("switch.next")
+ nextCond = b.newBlock(KindSwitchNextCase, cc)
b.add(cond) // one half of the tag==cond condition
b.ifelse(body, nextCond)
b.current = nextCond
@@ -247,7 +247,7 @@ func (b *builder) typeSwitchStmt(s *ast.TypeSwitchStmt, label *lblock) {
b.add(s.Assign)
}
- done := b.newBlock("typeswitch.done")
+ done := b.newBlock(KindSwitchDone, s)
if label != nil {
label._break = done
}
@@ -258,10 +258,10 @@ func (b *builder) typeSwitchStmt(s *ast.TypeSwitchStmt, label *lblock) {
default_ = cc
continue
}
- body := b.newBlock("typeswitch.body")
+ body := b.newBlock(KindSwitchCaseBody, cc)
var next *Block
for _, casetype := range cc.List {
- next = b.newBlock("typeswitch.next")
+ next = b.newBlock(KindSwitchNextCase, cc)
// casetype is a type, so don't call b.add(casetype).
// This block logically contains a type assertion,
// x.(casetype), but it's unclear how to represent x.
@@ -300,7 +300,7 @@ func (b *builder) selectStmt(s *ast.SelectStmt, label *lblock) {
}
}
- done := b.newBlock("select.done")
+ done := b.newBlock(KindSelectDone, s)
if label != nil {
label._break = done
}
@@ -312,8 +312,8 @@ func (b *builder) selectStmt(s *ast.SelectStmt, label *lblock) {
defaultBody = &clause.Body
continue
}
- body := b.newBlock("select.body")
- next := b.newBlock("select.next")
+ body := b.newBlock(KindSelectCaseBody, clause)
+ next := b.newBlock(KindSelectAfterCase, clause)
b.ifelse(body, next)
b.current = body
b.targets = &targets{
@@ -358,15 +358,15 @@ func (b *builder) forStmt(s *ast.ForStmt, label *lblock) {
if s.Init != nil {
b.stmt(s.Init)
}
- body := b.newBlock("for.body")
- done := b.newBlock("for.done") // target of 'break'
- loop := body // target of back-edge
+ body := b.newBlock(KindForBody, s)
+ done := b.newBlock(KindForDone, s) // target of 'break'
+ loop := body // target of back-edge
if s.Cond != nil {
- loop = b.newBlock("for.loop")
+ loop = b.newBlock(KindForLoop, s)
}
cont := loop // target of 'continue'
if s.Post != nil {
- cont = b.newBlock("for.post")
+ cont = b.newBlock(KindForPost, s)
}
if label != nil {
label._break = done
@@ -414,12 +414,12 @@ func (b *builder) rangeStmt(s *ast.RangeStmt, label *lblock) {
// jump loop
// done: (target of break)
- loop := b.newBlock("range.loop")
+ loop := b.newBlock(KindRangeLoop, s)
b.jump(loop)
b.current = loop
- body := b.newBlock("range.body")
- done := b.newBlock("range.done")
+ body := b.newBlock(KindRangeBody, s)
+ done := b.newBlock(KindRangeDone, s)
b.ifelse(body, done)
b.current = body
@@ -461,14 +461,19 @@ type lblock struct {
// labeledBlock returns the branch target associated with the
// specified label, creating it if needed.
-func (b *builder) labeledBlock(label *ast.Ident) *lblock {
- lb := b.lblocks[label.Obj]
+func (b *builder) labeledBlock(label *ast.Ident, stmt *ast.LabeledStmt) *lblock {
+ lb := b.lblocks[label.Name]
if lb == nil {
- lb = &lblock{_goto: b.newBlock(label.Name)}
+ lb = &lblock{_goto: b.newBlock(KindLabel, nil)}
if b.lblocks == nil {
- b.lblocks = make(map[*ast.Object]*lblock)
+ b.lblocks = make(map[string]*lblock)
}
- b.lblocks[label.Obj] = lb
+ b.lblocks[label.Name] = lb
+ }
+ // Fill in the label later (in case of forward goto).
+ // Stmt may be set already if labels are duplicated (ill-typed).
+ if stmt != nil && lb._goto.Stmt == nil {
+ lb._goto.Stmt = stmt
}
return lb
}
@@ -477,11 +482,12 @@ func (b *builder) labeledBlock(label *ast.Ident) *lblock {
// slice and returns it.
// It does not automatically become the current block.
// comment is an optional string for more readable debugging output.
-func (b *builder) newBlock(comment string) *Block {
+func (b *builder) newBlock(kind BlockKind, stmt ast.Stmt) *Block {
g := b.cfg
block := &Block{
- Index: int32(len(g.Blocks)),
- comment: comment,
+ Index: int32(len(g.Blocks)),
+ Kind: kind,
+ Stmt: stmt,
}
block.Succs = block.succs2[:0]
g.Blocks = append(g.Blocks, block)
diff --git a/vendor/golang.org/x/tools/go/cfg/cfg.go b/vendor/golang.org/x/tools/go/cfg/cfg.go
index e9c48d51da..01668359af 100644
--- a/vendor/golang.org/x/tools/go/cfg/cfg.go
+++ b/vendor/golang.org/x/tools/go/cfg/cfg.go
@@ -9,7 +9,10 @@
//
// The blocks of the CFG contain all the function's non-control
// statements. The CFG does not contain control statements such as If,
-// Switch, Select, and Branch, but does contain their subexpressions.
+// Switch, Select, and Branch, but does contain their subexpressions;
+// also, each block records the control statement (Block.Stmt) that
+// gave rise to it and its relationship (Block.Kind) to that statement.
+//
// For example, this source code:
//
// if x := f(); x != nil {
@@ -20,14 +23,14 @@
//
// produces this CFG:
//
-// 1: x := f()
+// 1: x := f() Body
// x != nil
// succs: 2, 3
-// 2: T()
+// 2: T() IfThen
// succs: 4
-// 3: F()
+// 3: F() IfElse
// succs: 4
-// 4:
+// 4: IfDone
//
// The CFG does contain Return statements; even implicit returns are
// materialized (at the position of the function's closing brace).
@@ -50,6 +53,7 @@ import (
//
// The entry point is Blocks[0]; there may be multiple return blocks.
type CFG struct {
+ fset *token.FileSet
Blocks []*Block // block[0] is entry; order otherwise undefined
}
@@ -64,9 +68,63 @@ type Block struct {
Succs []*Block // successor nodes in the graph
Index int32 // index within CFG.Blocks
Live bool // block is reachable from entry
+ Kind BlockKind // block kind
+ Stmt ast.Stmt // statement that gave rise to this block (see BlockKind for details)
- comment string // for debugging
- succs2 [2]*Block // underlying array for Succs
+ succs2 [2]*Block // underlying array for Succs
+}
+
+// A BlockKind identifies the purpose of a block.
+// It also determines the possible types of its Stmt field.
+type BlockKind uint8
+
+const (
+ KindInvalid BlockKind = iota // Stmt=nil
+
+ KindUnreachable // unreachable block after {Branch,Return}Stmt / no-return call ExprStmt
+ KindBody // function body BlockStmt
+ KindForBody // body of ForStmt
+ KindForDone // block after ForStmt
+ KindForLoop // head of ForStmt
+ KindForPost // post condition of ForStmt
+ KindIfDone // block after IfStmt
+ KindIfElse // else block of IfStmt
+ KindIfThen // then block of IfStmt
+ KindLabel // labeled block of BranchStmt (Stmt may be nil for dangling label)
+ KindRangeBody // body of RangeStmt
+ KindRangeDone // block after RangeStmt
+ KindRangeLoop // head of RangeStmt
+ KindSelectCaseBody // body of SelectStmt
+ KindSelectDone // block after SelectStmt
+ KindSelectAfterCase // block after a CommClause
+ KindSwitchCaseBody // body of CaseClause
+ KindSwitchDone // block after {Type.}SwitchStmt
+ KindSwitchNextCase // secondary expression of a multi-expression CaseClause
+)
+
+func (kind BlockKind) String() string {
+ return [...]string{
+ KindInvalid: "Invalid",
+ KindUnreachable: "Unreachable",
+ KindBody: "Body",
+ KindForBody: "ForBody",
+ KindForDone: "ForDone",
+ KindForLoop: "ForLoop",
+ KindForPost: "ForPost",
+ KindIfDone: "IfDone",
+ KindIfElse: "IfElse",
+ KindIfThen: "IfThen",
+ KindLabel: "Label",
+ KindRangeBody: "RangeBody",
+ KindRangeDone: "RangeDone",
+ KindRangeLoop: "RangeLoop",
+ KindSelectCaseBody: "SelectCaseBody",
+ KindSelectDone: "SelectDone",
+ KindSelectAfterCase: "SelectAfterCase",
+ KindSwitchCaseBody: "SwitchCaseBody",
+ KindSwitchDone: "SwitchDone",
+ KindSwitchNextCase: "SwitchNextCase",
+ }[kind]
}
// New returns a new control-flow graph for the specified function body,
@@ -82,7 +140,7 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG {
mayReturn: mayReturn,
cfg: new(CFG),
}
- b.current = b.newBlock("entry")
+ b.current = b.newBlock(KindBody, body)
b.stmt(body)
// Compute liveness (reachability from entry point), breadth-first.
@@ -110,7 +168,15 @@ func New(body *ast.BlockStmt, mayReturn func(*ast.CallExpr) bool) *CFG {
}
func (b *Block) String() string {
- return fmt.Sprintf("block %d (%s)", b.Index, b.comment)
+ return fmt.Sprintf("block %d (%s)", b.Index, b.comment(nil))
+}
+
+func (b *Block) comment(fset *token.FileSet) string {
+ s := b.Kind.String()
+ if fset != nil && b.Stmt != nil {
+ s = fmt.Sprintf("%s@L%d", s, fset.Position(b.Stmt.Pos()).Line)
+ }
+ return s
}
// Return returns the return statement at the end of this block if present, nil
@@ -129,7 +195,7 @@ func (b *Block) Return() (ret *ast.ReturnStmt) {
func (g *CFG) Format(fset *token.FileSet) string {
var buf bytes.Buffer
for _, b := range g.Blocks {
- fmt.Fprintf(&buf, ".%d: # %s\n", b.Index, b.comment)
+ fmt.Fprintf(&buf, ".%d: # %s\n", b.Index, b.comment(fset))
for _, n := range b.Nodes {
fmt.Fprintf(&buf, "\t%s\n", formatNode(fset, n))
}
@@ -145,6 +211,35 @@ func (g *CFG) Format(fset *token.FileSet) string {
return buf.String()
}
+// digraph emits AT&T GraphViz (dot) syntax for the CFG.
+// TODO(adonovan): publish; needs a proposal.
+func (g *CFG) digraph(fset *token.FileSet) string {
+ var buf bytes.Buffer
+ buf.WriteString("digraph CFG {\n")
+ buf.WriteString(" node [shape=box];\n")
+ for _, b := range g.Blocks {
+ // node label
+ var text bytes.Buffer
+ text.WriteString(b.comment(fset))
+ for _, n := range b.Nodes {
+ fmt.Fprintf(&text, "\n%s", formatNode(fset, n))
+ }
+
+ // node and edges
+ fmt.Fprintf(&buf, " n%d [label=%q];\n", b.Index, &text)
+ for _, succ := range b.Succs {
+ fmt.Fprintf(&buf, " n%d -> n%d;\n", b.Index, succ.Index)
+ }
+ }
+ buf.WriteString("}\n")
+ return buf.String()
+}
+
+// exposed to main.go
+func digraph(g *CFG, fset *token.FileSet) string {
+ return g.digraph(fset)
+}
+
func formatNode(fset *token.FileSet, n ast.Node) string {
var buf bytes.Buffer
format.Node(&buf, fset, n)
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index 03543bd4bb..137cc8df1d 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -47,7 +47,7 @@ import (
func Find(importPath, srcDir string) (filename, path string) {
cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
cmd.Dir = srcDir
- out, err := cmd.CombinedOutput()
+ out, err := cmd.Output()
if err != nil {
return "", ""
}
diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
index b5bb95a63e..2455be54f6 100644
--- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
+++ b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
@@ -15,12 +15,15 @@ import (
// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
- out, err := cmd.CombinedOutput()
+ out, err := cmd.Output()
if err != nil {
s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
if len(out) > 0 {
s = fmt.Sprintf("%s: %s", s, out)
}
+ if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 {
+ s = fmt.Sprintf("%s\nstderr:\n%s", s, err.Stderr)
+ }
return nil, errors.New(s)
}
if len(out) > 0 {
diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go
index 8622dfc53a..72e906c385 100644
--- a/vendor/golang.org/x/tools/go/ssa/builder.go
+++ b/vendor/golang.org/x/tools/go/ssa/builder.go
@@ -81,16 +81,15 @@ import (
"os"
"sync"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
"golang.org/x/tools/internal/versions"
)
-type opaqueType struct {
- types.Type
- name string
-}
+type opaqueType struct{ name string }
-func (t *opaqueType) String() string { return t.name }
+func (t *opaqueType) String() string { return t.name }
+func (t *opaqueType) Underlying() types.Type { return t }
var (
varOk = newVar("ok", tBool)
@@ -103,7 +102,7 @@ var (
tInvalid = types.Typ[types.Invalid]
tString = types.Typ[types.String]
tUntypedNil = types.Typ[types.UntypedNil]
- tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators
+ tRangeIter = &opaqueType{"iter"} // the type of all "range" iterators
tEface = types.NewInterfaceType(nil, nil).Complete()
// SSA Value constants.
@@ -328,7 +327,7 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ
}
case "new":
- return emitNew(fn, mustDeref(typ), pos, "new")
+ return emitNew(fn, typeparams.MustDeref(typ), pos, "new")
case "len", "cap":
// Special case: len or cap of an array or *array is
@@ -419,7 +418,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
wantAddr := true
v := b.receiver(fn, e.X, wantAddr, escaping, sel)
index := sel.index[len(sel.index)-1]
- fld := fieldOf(mustDeref(v.Type()), index) // v is an addr.
+ fld := fieldOf(typeparams.MustDeref(v.Type()), index) // v is an addr.
// Due to the two phases of resolving AssignStmt, a panic from x.f = p()
// when x is nil is required to come after the side-effects of
@@ -468,7 +467,7 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
v.setType(et)
return fn.emit(v)
}
- return &lazyAddress{addr: emit, t: mustDeref(et), pos: e.Lbrack, expr: e}
+ return &lazyAddress{addr: emit, t: typeparams.MustDeref(et), pos: e.Lbrack, expr: e}
case *ast.StarExpr:
return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e}
@@ -802,7 +801,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
if types.IsInterface(rt) {
// If v may be an interface type I (after instantiating),
// we must emit a check that v is non-nil.
- if recv, ok := sel.recv.(*types.TypeParam); ok {
+ if recv, ok := aliases.Unalias(sel.recv).(*types.TypeParam); ok {
// Emit a nil check if any possible instantiation of the
// type parameter is an interface type.
if typeSetOf(recv).Len() > 0 {
@@ -1253,7 +1252,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero
case *types.Array, *types.Slice:
var at *types.Array
var array Value
- switch t := t.(type) {
+ switch t := aliases.Unalias(t).(type) {
case *types.Slice:
at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts))
array = emitNew(fn, at, e.Lbrace, "slicelit")
@@ -1748,8 +1747,7 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
// Use forStmtGo122 instead if it applies.
if s.Init != nil {
if assign, ok := s.Init.(*ast.AssignStmt); ok && assign.Tok == token.DEFINE {
- afterGo122 := versions.Compare(fn.goversion, "go1.21") > 0
- if afterGo122 {
+ if versions.AtLeast(fn.goversion, versions.Go1_22) {
b.forStmtGo122(fn, s, label)
return
}
@@ -2244,7 +2242,7 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) {
}
}
- afterGo122 := versions.Compare(fn.goversion, "go1.21") > 0
+ afterGo122 := versions.AtLeast(fn.goversion, versions.Go1_22)
if s.Tok == token.DEFINE && !afterGo122 {
// pre-go1.22: If iteration variables are defined (:=), this
// occurs once outside the loop.
diff --git a/vendor/golang.org/x/tools/go/ssa/const.go b/vendor/golang.org/x/tools/go/ssa/const.go
index 2a6ac5882a..e0d79f5ef7 100644
--- a/vendor/golang.org/x/tools/go/ssa/const.go
+++ b/vendor/golang.org/x/tools/go/ssa/const.go
@@ -14,6 +14,7 @@ import (
"strconv"
"strings"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -47,7 +48,7 @@ func soleTypeKind(typ types.Type) types.BasicInfo {
state := types.IsBoolean | types.IsInteger | types.IsString
underIs(typeSetOf(typ), func(t types.Type) bool {
var c types.BasicInfo
- if t, ok := t.(*types.Basic); ok {
+ if t, ok := aliases.Unalias(t).(*types.Basic); ok {
c = t.Info()
}
if c&types.IsNumeric != 0 { // int/float/complex
@@ -113,7 +114,7 @@ func zeroString(t types.Type, from *types.Package) string {
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return "nil"
- case *types.Named:
+ case *types.Named, *aliases.Alias:
return zeroString(t.Underlying(), from)
case *types.Array, *types.Struct:
return relType(t, from) + "{}"
diff --git a/vendor/golang.org/x/tools/go/ssa/coretype.go b/vendor/golang.org/x/tools/go/ssa/coretype.go
index 88136b4384..3a512830b1 100644
--- a/vendor/golang.org/x/tools/go/ssa/coretype.go
+++ b/vendor/golang.org/x/tools/go/ssa/coretype.go
@@ -7,6 +7,7 @@ package ssa
import (
"go/types"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -49,7 +50,7 @@ func typeSetOf(typ types.Type) termList {
// This is a adaptation of x/exp/typeparams.NormalTerms which x/tools cannot depend on.
var terms []*types.Term
var err error
- switch typ := typ.(type) {
+ switch typ := aliases.Unalias(typ).(type) {
case *types.TypeParam:
terms, err = typeparams.StructuralTerms(typ)
case *types.Union:
diff --git a/vendor/golang.org/x/tools/go/ssa/create.go b/vendor/golang.org/x/tools/go/ssa/create.go
index c4da35d0b0..f8f584a1a5 100644
--- a/vendor/golang.org/x/tools/go/ssa/create.go
+++ b/vendor/golang.org/x/tools/go/ssa/create.go
@@ -245,7 +245,7 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *
if len(files) > 0 {
// Go source package.
for _, file := range files {
- goversion := versions.Lang(versions.FileVersions(p.info, file))
+ goversion := versions.Lang(versions.FileVersion(p.info, file))
for _, decl := range file.Decls {
membersFromDecl(p, decl, goversion)
}
@@ -259,6 +259,7 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *
obj := scope.Lookup(name)
memberFromObject(p, obj, nil, "")
if obj, ok := obj.(*types.TypeName); ok {
+ // No Unalias: aliases should not duplicate methods.
if named, ok := obj.Type().(*types.Named); ok {
for i, n := 0, named.NumMethods(); i < n; i++ {
memberFromObject(p, named.Method(i), nil, "")
diff --git a/vendor/golang.org/x/tools/go/ssa/emit.go b/vendor/golang.org/x/tools/go/ssa/emit.go
index d77b4407a8..549c9114d4 100644
--- a/vendor/golang.org/x/tools/go/ssa/emit.go
+++ b/vendor/golang.org/x/tools/go/ssa/emit.go
@@ -11,6 +11,9 @@ import (
"go/ast"
"go/token"
"go/types"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typeparams"
)
// emitAlloc emits to f a new Alloc instruction allocating a variable
@@ -64,7 +67,7 @@ func emitLocalVar(f *Function, v *types.Var) *Alloc {
// new temporary, and returns the value so defined.
func emitLoad(f *Function, addr Value) *UnOp {
v := &UnOp{Op: token.MUL, X: addr}
- v.setType(mustDeref(addr.Type()))
+ v.setType(typeparams.MustDeref(addr.Type()))
f.emit(v)
return v
}
@@ -182,7 +185,7 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value {
// isValuePreserving returns true if a conversion from ut_src to
// ut_dst is value-preserving, i.e. just a change of type.
-// Precondition: neither argument is a named type.
+// Precondition: neither argument is a named or alias type.
func isValuePreserving(ut_src, ut_dst types.Type) bool {
// Identical underlying types?
if types.IdenticalIgnoreTags(ut_dst, ut_src) {
@@ -281,11 +284,11 @@ func emitConv(f *Function, val Value, typ types.Type) Value {
}
// Conversion from slice to array or slice to array pointer?
- if slice, ok := s.(*types.Slice); ok {
+ if slice, ok := aliases.Unalias(s).(*types.Slice); ok {
var arr *types.Array
var ptr bool
// Conversion from slice to array pointer?
- switch d := d.(type) {
+ switch d := aliases.Unalias(d).(type) {
case *types.Array:
arr = d
case *types.Pointer:
@@ -414,7 +417,7 @@ func emitTypeCoercion(f *Function, v Value, typ types.Type) Value {
// emitStore emits to f an instruction to store value val at location
// addr, applying implicit conversions as required by assignability rules.
func emitStore(f *Function, addr, val Value, pos token.Pos) *Store {
- typ := mustDeref(addr.Type())
+ typ := typeparams.MustDeref(addr.Type())
s := &Store{
Addr: addr,
Val: emitConv(f, val, typ),
diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go
index 22f878d4ed..4d3e39129c 100644
--- a/vendor/golang.org/x/tools/go/ssa/func.go
+++ b/vendor/golang.org/x/tools/go/ssa/func.go
@@ -14,6 +14,8 @@ import (
"io"
"os"
"strings"
+
+ "golang.org/x/tools/internal/typeparams"
)
// Like ObjectOf, but panics instead of returning nil.
@@ -531,7 +533,7 @@ func WriteFunction(buf *bytes.Buffer, f *Function) {
if len(f.Locals) > 0 {
buf.WriteString("# Locals:\n")
for i, l := range f.Locals {
- fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(mustDeref(l.Type()), from))
+ fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(typeparams.MustDeref(l.Type()), from))
}
}
writeSignature(buf, from, f.Name(), f.Signature)
@@ -586,6 +588,12 @@ func WriteFunction(buf *bytes.Buffer, f *Function) {
default:
buf.WriteString(instr.String())
}
+ // -mode=S: show line numbers
+ if f.Prog.mode&LogSource != 0 {
+ if pos := instr.Pos(); pos.IsValid() {
+ fmt.Fprintf(buf, " L%d", f.Prog.Fset.Position(pos).Line)
+ }
+ }
buf.WriteString("\n")
}
}
diff --git a/vendor/golang.org/x/tools/go/ssa/lift.go b/vendor/golang.org/x/tools/go/ssa/lift.go
index da49fe9f17..8bb1949449 100644
--- a/vendor/golang.org/x/tools/go/ssa/lift.go
+++ b/vendor/golang.org/x/tools/go/ssa/lift.go
@@ -43,6 +43,8 @@ import (
"go/token"
"math/big"
"os"
+
+ "golang.org/x/tools/internal/typeparams"
)
// If true, show diagnostic information at each step of lifting.
@@ -465,7 +467,7 @@ func liftAlloc(df domFrontier, alloc *Alloc, newPhis newPhiMap, fresh *int) bool
*fresh++
phi.pos = alloc.Pos()
- phi.setType(mustDeref(alloc.Type()))
+ phi.setType(typeparams.MustDeref(alloc.Type()))
phi.block = v
if debugLifting {
fmt.Fprintf(os.Stderr, "\tplace %s = %s at block %s\n", phi.Name(), phi, v)
@@ -510,7 +512,7 @@ func replaceAll(x, y Value) {
func renamed(renaming []Value, alloc *Alloc) Value {
v := renaming[alloc.index]
if v == nil {
- v = zeroConst(mustDeref(alloc.Type()))
+ v = zeroConst(typeparams.MustDeref(alloc.Type()))
renaming[alloc.index] = v
}
return v
diff --git a/vendor/golang.org/x/tools/go/ssa/lvalue.go b/vendor/golang.org/x/tools/go/ssa/lvalue.go
index 186cfcae70..eede307eab 100644
--- a/vendor/golang.org/x/tools/go/ssa/lvalue.go
+++ b/vendor/golang.org/x/tools/go/ssa/lvalue.go
@@ -11,6 +11,8 @@ import (
"go/ast"
"go/token"
"go/types"
+
+ "golang.org/x/tools/internal/typeparams"
)
// An lvalue represents an assignable location that may appear on the
@@ -52,7 +54,7 @@ func (a *address) address(fn *Function) Value {
}
func (a *address) typ() types.Type {
- return mustDeref(a.addr.Type())
+ return typeparams.MustDeref(a.addr.Type())
}
// An element is an lvalue represented by m[k], the location of an
diff --git a/vendor/golang.org/x/tools/go/ssa/methods.go b/vendor/golang.org/x/tools/go/ssa/methods.go
index 4797b39286..5f46a18484 100644
--- a/vendor/golang.org/x/tools/go/ssa/methods.go
+++ b/vendor/golang.org/x/tools/go/ssa/methods.go
@@ -11,6 +11,7 @@ import (
"go/types"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -209,6 +210,9 @@ func forEachReachable(msets *typeutil.MethodSetCache, T types.Type, f func(types
}
switch T := T.(type) {
+ case *aliases.Alias:
+ visit(aliases.Unalias(T), false)
+
case *types.Basic:
// nop
diff --git a/vendor/golang.org/x/tools/go/ssa/parameterized.go b/vendor/golang.org/x/tools/go/ssa/parameterized.go
index 84db49d392..74c541107e 100644
--- a/vendor/golang.org/x/tools/go/ssa/parameterized.go
+++ b/vendor/golang.org/x/tools/go/ssa/parameterized.go
@@ -8,6 +8,7 @@ import (
"go/types"
"sync"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
)
@@ -48,6 +49,9 @@ func (w *tpWalker) isParameterizedLocked(typ types.Type) (res bool) {
case nil, *types.Basic: // TODO(gri) should nil be handled here?
break
+ case *aliases.Alias:
+ return w.isParameterizedLocked(aliases.Unalias(t))
+
case *types.Array:
return w.isParameterizedLocked(t.Elem())
diff --git a/vendor/golang.org/x/tools/go/ssa/print.go b/vendor/golang.org/x/tools/go/ssa/print.go
index 727a735026..38d8404fdc 100644
--- a/vendor/golang.org/x/tools/go/ssa/print.go
+++ b/vendor/golang.org/x/tools/go/ssa/print.go
@@ -17,6 +17,7 @@ import (
"strings"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/typeparams"
)
// relName returns the name of v relative to i.
@@ -94,7 +95,7 @@ func (v *Alloc) String() string {
op = "new"
}
from := v.Parent().relPkg()
- return fmt.Sprintf("%s %s (%s)", op, relType(mustDeref(v.Type()), from), v.Comment)
+ return fmt.Sprintf("%s %s (%s)", op, relType(typeparams.MustDeref(v.Type()), from), v.Comment)
}
func (v *Phi) String() string {
@@ -260,7 +261,7 @@ func (v *MakeChan) String() string {
func (v *FieldAddr) String() string {
// Be robust against a bad index.
name := "?"
- if fld := fieldOf(mustDeref(v.X.Type()), v.Field); fld != nil {
+ if fld := fieldOf(typeparams.MustDeref(v.X.Type()), v.Field); fld != nil {
name = fld.Name()
}
return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field)
@@ -449,7 +450,7 @@ func WritePackage(buf *bytes.Buffer, p *Package) {
case *Global:
fmt.Fprintf(buf, " var %-*s %s\n",
- maxname, name, relType(mustDeref(mem.Type()), from))
+ maxname, name, relType(typeparams.MustDeref(mem.Type()), from))
}
}
diff --git a/vendor/golang.org/x/tools/go/ssa/sanity.go b/vendor/golang.org/x/tools/go/ssa/sanity.go
index 22a3c6bc3d..13bd39fe86 100644
--- a/vendor/golang.org/x/tools/go/ssa/sanity.go
+++ b/vendor/golang.org/x/tools/go/ssa/sanity.go
@@ -349,7 +349,7 @@ func (s *sanity) checkBlock(b *BasicBlock, index int) {
// Check that "untyped" types only appear on constant operands.
if _, ok := (*op).(*Const); !ok {
- if basic, ok := (*op).Type().(*types.Basic); ok {
+ if basic, ok := (*op).Type().Underlying().(*types.Basic); ok {
if basic.Info()&types.IsUntyped != 0 {
s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
}
diff --git a/vendor/golang.org/x/tools/go/ssa/subst.go b/vendor/golang.org/x/tools/go/ssa/subst.go
index a9a6d41e81..9f2f2f3000 100644
--- a/vendor/golang.org/x/tools/go/ssa/subst.go
+++ b/vendor/golang.org/x/tools/go/ssa/subst.go
@@ -6,6 +6,8 @@ package ssa
import (
"go/types"
+
+ "golang.org/x/tools/internal/aliases"
)
// Type substituter for a fixed set of replacement types.
@@ -80,6 +82,9 @@ func (subst *subster) typ(t types.Type) (res types.Type) {
// fall through if result r will be identical to t, types.Identical(r, t).
switch t := t.(type) {
+ case *aliases.Alias:
+ return subst.typ(aliases.Unalias(t))
+
case *types.TypeParam:
r := subst.replacements[t]
assert(r != nil, "type param without replacement encountered")
@@ -466,7 +471,7 @@ func reaches(t types.Type, c map[types.Type]bool) (res bool) {
return true
}
}
- case *types.Named:
+ case *types.Named, *aliases.Alias:
return reaches(t.Underlying(), c)
default:
panic("unreachable")
diff --git a/vendor/golang.org/x/tools/go/ssa/util.go b/vendor/golang.org/x/tools/go/ssa/util.go
index 6e9f1282b1..4d65259ed9 100644
--- a/vendor/golang.org/x/tools/go/ssa/util.go
+++ b/vendor/golang.org/x/tools/go/ssa/util.go
@@ -17,7 +17,9 @@ import (
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/go/types/typeutil"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
)
//// Sanity checking utilities
@@ -50,16 +52,19 @@ func isNonTypeParamInterface(t types.Type) bool {
// isBasic reports whether t is a basic type.
func isBasic(t types.Type) bool {
- _, ok := t.(*types.Basic)
+ _, ok := aliases.Unalias(t).(*types.Basic)
return ok
}
// isString reports whether t is exactly a string type.
+// t is assumed to be an Underlying type (not Named or Alias).
func isString(t types.Type) bool {
- return isBasic(t) && t.(*types.Basic).Info()&types.IsString != 0
+ basic, ok := t.(*types.Basic)
+ return ok && basic.Info()&types.IsString != 0
}
// isByteSlice reports whether t is of the form []~bytes.
+// t is assumed to be an Underlying type (not Named or Alias).
func isByteSlice(t types.Type) bool {
if b, ok := t.(*types.Slice); ok {
e, _ := b.Elem().Underlying().(*types.Basic)
@@ -69,6 +74,7 @@ func isByteSlice(t types.Type) bool {
}
// isRuneSlice reports whether t is of the form []~runes.
+// t is assumed to be an Underlying type (not Named or Alias).
func isRuneSlice(t types.Type) bool {
if b, ok := t.(*types.Slice); ok {
e, _ := b.Elem().Underlying().(*types.Basic)
@@ -114,15 +120,6 @@ func deref(typ types.Type) (types.Type, bool) {
return typ, false
}
-// mustDeref returns the element type of a type with a pointer core type.
-// Panics on failure.
-func mustDeref(typ types.Type) types.Type {
- if et, ok := deref(typ); ok {
- return et
- }
- panic("cannot dereference type " + typ.String())
-}
-
// recvType returns the receiver type of method obj.
func recvType(obj *types.Func) types.Type {
return obj.Type().(*types.Signature).Recv().Type()
@@ -139,8 +136,9 @@ func fieldOf(typ types.Type, index int) *types.Var {
return nil
}
-// isUntyped returns true for types that are untyped.
+// isUntyped reports whether typ is the type of an untyped constant.
func isUntyped(typ types.Type) bool {
+ // No Underlying/Unalias: untyped constant types cannot be Named or Alias.
b, ok := typ.(*types.Basic)
return ok && b.Info()&types.IsUntyped != 0
}
@@ -180,17 +178,13 @@ func makeLen(T types.Type) *Builtin {
}
}
-// receiverTypeArgs returns the type arguments to a function's receiver.
-// Returns an empty list if obj does not have a receiver or its receiver does not have type arguments.
-func receiverTypeArgs(obj *types.Func) []types.Type {
- rtype := recvType(obj)
- if rtype == nil {
- return nil
- }
- rtype, _ = deptr(rtype)
- named, ok := rtype.(*types.Named)
- if !ok {
- return nil
+// receiverTypeArgs returns the type arguments to a method's receiver.
+// Returns an empty list if the receiver does not have type arguments.
+func receiverTypeArgs(method *types.Func) []types.Type {
+ recv := method.Type().(*types.Signature).Recv()
+ _, named := typesinternal.ReceiverNamed(recv)
+ if named == nil {
+ return nil // recv is anonymous struct/interface
}
ts := named.TypeArgs()
if ts.Len() == 0 {
@@ -354,10 +348,10 @@ func (m *typeListMap) hash(ts []types.Type) uint32 {
// instantiateMethod instantiates m with targs and returns a canonical representative for this method.
func (canon *canonizer) instantiateMethod(m *types.Func, targs []types.Type, ctxt *types.Context) *types.Func {
recv := recvType(m)
- if p, ok := recv.(*types.Pointer); ok {
+ if p, ok := aliases.Unalias(recv).(*types.Pointer); ok {
recv = p.Elem()
}
- named := recv.(*types.Named)
+ named := aliases.Unalias(recv).(*types.Named)
inst, err := types.Instantiate(ctxt, named.Origin(), targs, false)
if err != nil {
panic(err)
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index 11d5c8c3ad..6a57ce3b13 100644
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -29,9 +29,13 @@ import (
"strconv"
"strings"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typeparams"
+ "golang.org/x/tools/internal/typesinternal"
)
+// TODO(adonovan): think about generic aliases.
+
// A Path is an opaque name that identifies a types.Object
// relative to its package. Conceptually, the name consists of a
// sequence of destructuring operations applied to the package scope
@@ -223,7 +227,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
// Reject obviously non-viable cases.
switch obj := obj.(type) {
case *types.TypeName:
- if _, ok := obj.Type().(*types.TypeParam); !ok {
+ if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok {
// With the exception of type parameters, only package-level type names
// have a path.
return "", fmt.Errorf("no path for %v", obj)
@@ -310,7 +314,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
}
// Inspect declared methods of defined types.
- if T, ok := o.Type().(*types.Named); ok {
+ if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok {
path = append(path, opType)
// The method index here is always with respect
// to the underlying go/types data structures,
@@ -395,13 +399,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
return "", false
}
- recvT := meth.Type().(*types.Signature).Recv().Type()
- if ptr, ok := recvT.(*types.Pointer); ok {
- recvT = ptr.Elem()
- }
-
- named, ok := recvT.(*types.Named)
- if !ok {
+ _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv())
+ if named == nil {
return "", false
}
@@ -444,6 +443,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
// nil, it will be allocated as necessary.
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
switch T := T.(type) {
+ case *aliases.Alias:
+ return find(obj, aliases.Unalias(T), path, seen)
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
@@ -616,6 +617,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
// Inv: t != nil, obj == nil
+ t = aliases.Unalias(t)
switch code {
case opElem:
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
index fa55b0a1e6..a0c1a60ac0 100644
--- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -6,7 +6,11 @@ package typeutil
// This file defines utilities for user interfaces that display types.
-import "go/types"
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/aliases"
+)
// IntuitiveMethodSet returns the intuitive method set of a type T,
// which is the set of methods you can call on an addressable value of
@@ -24,7 +28,7 @@ import "go/types"
// The order of the result is as for types.MethodSet(T).
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
isPointerToConcrete := func(T types.Type) bool {
- ptr, ok := T.(*types.Pointer)
+ ptr, ok := aliases.Unalias(T).(*types.Pointer)
return ok && !types.IsInterface(ptr.Elem())
}
diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
index b24a0fba9e..c3022a2862 100644
--- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
+++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
@@ -13,6 +13,8 @@ import (
"go/token"
"go/types"
"strconv"
+
+ "golang.org/x/tools/internal/aliases"
)
func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
@@ -28,7 +30,10 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos
}
func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- under := typ
+ // TODO(adonovan): think about generics, and also generic aliases.
+ under := aliases.Unalias(typ)
+ // Don't call Underlying unconditionally: although it removed
+ // Named and Alias, it also removes TypeParam.
if n, ok := typ.(*types.Named); ok {
under = n.Underlying()
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index 2d078ccb19..39df91124a 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -259,13 +259,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
return
}
-func deref(typ types.Type) types.Type {
- if p, _ := typ.(*types.Pointer); p != nil {
- return p.Elem()
- }
- return typ
-}
-
type byPath []*types.Package
func (a byPath) Len() int { return len(a) }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index 2ee8c70164..638fc1d3b8 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -23,6 +23,7 @@ import (
"strings"
"golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/tokeninternal"
)
@@ -506,13 +507,13 @@ func (p *iexporter) doDecl(obj types.Object) {
case *types.TypeName:
t := obj.Type()
- if tparam, ok := t.(*types.TypeParam); ok {
+ if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
w.tag('P')
w.pos(obj.Pos())
constraint := tparam.Constraint()
if p.version >= iexportVersionGo1_18 {
implicit := false
- if iface, _ := constraint.(*types.Interface); iface != nil {
+ if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
implicit = iface.IsImplicit()
}
w.bool(implicit)
@@ -738,6 +739,8 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
}()
}
switch t := t.(type) {
+ // TODO(adonovan): support types.Alias.
+
case *types.Named:
if targs := t.TypeArgs(); targs.Len() > 0 {
w.startType(instanceType)
@@ -843,7 +846,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
for i := 0; i < n; i++ {
ft := t.EmbeddedType(i)
tPkg := pkg
- if named, _ := ft.(*types.Named); named != nil {
+ if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
w.pos(named.Obj().Pos())
} else {
w.pos(token.NoPos)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 9fffa9ad05..4d50eb8e58 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -22,6 +22,8 @@ import (
"strings"
"golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
)
type intReader struct {
@@ -522,7 +524,7 @@ func canReuse(def *types.Named, rhs types.Type) bool {
if def == nil {
return true
}
- iface, _ := rhs.(*types.Interface)
+ iface, _ := aliases.Unalias(rhs).(*types.Interface)
if iface == nil {
return true
}
@@ -587,14 +589,13 @@ func (r *importReader) obj(name string) {
// If the receiver has any targs, set those as the
// rparams of the method (since those are the
// typeparams being used in the method sig/body).
- base := baseType(recv.Type())
- assert(base != nil)
- targs := base.TypeArgs()
+ _, recvNamed := typesinternal.ReceiverNamed(recv)
+ targs := recvNamed.TypeArgs()
var rparams []*types.TypeParam
if targs.Len() > 0 {
rparams = make([]*types.TypeParam, targs.Len())
for i := range rparams {
- rparams[i] = targs.At(i).(*types.TypeParam)
+ rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
}
}
msig := r.signature(recv, rparams, nil)
@@ -624,7 +625,7 @@ func (r *importReader) obj(name string) {
}
constraint := r.typ()
if implicit {
- iface, _ := constraint.(*types.Interface)
+ iface, _ := aliases.Unalias(constraint).(*types.Interface)
if iface == nil {
errorf("non-interface constraint marked implicit")
}
@@ -831,7 +832,7 @@ func (r *importReader) typ() types.Type {
}
func isInterface(t types.Type) bool {
- _, ok := t.(*types.Interface)
+ _, ok := aliases.Unalias(t).(*types.Interface)
return ok
}
@@ -1030,7 +1031,7 @@ func (r *importReader) tparamList() []*types.TypeParam {
for i := range xs {
// Note: the standard library importer is tolerant of nil types here,
// though would panic in SetTypeParams.
- xs[i] = r.typ().(*types.TypeParam)
+ xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
}
return xs
}
@@ -1077,13 +1078,3 @@ func (r *importReader) byte() byte {
}
return x
}
-
-func baseType(typ types.Type) *types.Named {
- // pointer receivers are never types.Named types
- if p, _ := typ.(*types.Pointer); p != nil {
- typ = p.Elem()
- }
- // receiver base types are always (possibly generic) types.Named types
- n, _ := typ.(*types.Named)
- return n
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
deleted file mode 100644
index d892273efb..0000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.18
-// +build !go1.18
-
-package gcimporter
-
-import "go/types"
-
-const iexportVersion = iexportVersionGo1_11
-
-func additionalPredeclared() []types.Type {
- return nil
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
index edbe6ea704..0cd3b91b65 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.18
-// +build go1.18
-
package gcimporter
import "go/types"
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
index 286bf44548..38b624cada 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !(go1.18 && goexperiment.unified)
-// +build !go1.18 !goexperiment.unified
+//go:build !goexperiment.unified
+// +build !goexperiment.unified
package gcimporter
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
index b5d69ffbe6..b5118d0b3a 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.18 && goexperiment.unified
-// +build go1.18,goexperiment.unified
+//go:build goexperiment.unified
+// +build goexperiment.unified
package gcimporter
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
deleted file mode 100644
index 8eb20729c2..0000000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.18
-// +build !go1.18
-
-package gcimporter
-
-import (
- "fmt"
- "go/token"
- "go/types"
-)
-
-func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
- err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data")
- return
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index b977435f62..f4edc46ab7 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -4,9 +4,6 @@
// Derived from go/internal/gcimporter/ureader.go
-//go:build go1.18
-// +build go1.18
-
package gcimporter
import (
@@ -16,6 +13,7 @@ import (
"sort"
"strings"
+ "golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/pkgbits"
)
@@ -553,7 +551,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
// If the underlying type is an interface, we need to
// duplicate its methods so we can replace the receiver
// parameter's type (#49906).
- if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
methods := make([]*types.Func, iface.NumExplicitMethods())
for i := range methods {
fn := iface.ExplicitMethod(i)
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
index 7e638ec24f..ff9437a36c 100644
--- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
+++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
@@ -34,30 +34,16 @@ func GetLines(file *token.File) []int {
lines []int
_ []struct{}
}
- type tokenFile118 struct {
- _ *token.FileSet // deleted in go1.19
- tokenFile119
- }
-
- type uP = unsafe.Pointer
- switch unsafe.Sizeof(*file) {
- case unsafe.Sizeof(tokenFile118{}):
- var ptr *tokenFile118
- *(*uP)(uP(&ptr)) = uP(file)
- ptr.mu.Lock()
- defer ptr.mu.Unlock()
- return ptr.lines
- case unsafe.Sizeof(tokenFile119{}):
- var ptr *tokenFile119
- *(*uP)(uP(&ptr)) = uP(file)
- ptr.mu.Lock()
- defer ptr.mu.Unlock()
- return ptr.lines
-
- default:
+ if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
panic("unexpected token.File size")
}
+ var ptr *tokenFile119
+ type uP = unsafe.Pointer
+ *(*uP)(uP(&ptr)) = uP(file)
+ ptr.mu.Lock()
+ defer ptr.mu.Unlock()
+ return ptr.lines
}
// AddExistingFiles adds the specified files to the FileSet if they
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
index cdab988531..8c3a42dc31 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -2,20 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package typeparams contains common utilities for writing tools that interact
-// with generic Go code, as introduced with Go 1.18.
-//
-// Many of the types and functions in this package are proxies for the new APIs
-// introduced in the standard library with Go 1.18. For example, the
-// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
-// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
-// versions older than 1.18 these helpers are implemented as stubs, allowing
-// users of this package to write code that handles generic constructs inline,
-// even if the Go version being used to compile does not support generics.
-//
-// Additionally, this package contains common utilities for working with the
-// new generic constructs, to supplement the standard library APIs. Notably,
-// the StructuralTerms API computes a minimal representation of the structural
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
// restrictions on a type parameter.
//
// An external version of these APIs is available in the
@@ -27,6 +17,9 @@ import (
"go/ast"
"go/token"
"go/types"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
)
// UnpackIndexExpr extracts data from AST nodes that represent index
@@ -72,9 +65,9 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke
}
}
-// IsTypeParam reports whether t is a type parameter.
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
func IsTypeParam(t types.Type) bool {
- _, ok := t.(*types.TypeParam)
+ _, ok := aliases.Unalias(t).(*types.TypeParam)
return ok
}
@@ -90,13 +83,8 @@ func OriginMethod(fn *types.Func) *types.Func {
if recv == nil {
return fn
}
- base := recv.Type()
- p, isPtr := base.(*types.Pointer)
- if isPtr {
- base = p.Elem()
- }
- named, isNamed := base.(*types.Named)
- if !isNamed {
+ _, named := typesinternal.ReceiverNamed(recv)
+ if named == nil {
// Receiver is a *types.Interface.
return fn
}
@@ -158,6 +146,9 @@ func OriginMethod(fn *types.Func) *types.Func {
// In this case, GenericAssignableTo reports that instantiations of Container
// are assignable to the corresponding instantiation of Interface.
func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
+ V = aliases.Unalias(V)
+ T = aliases.Unalias(T)
+
// If V and T are not both named, or do not have matching non-empty type
// parameter lists, fall back on types.AssignableTo.
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
index 7ea8840eab..e66e9d0f48 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -5,7 +5,10 @@
package typeparams
import (
+ "fmt"
"go/types"
+
+ "golang.org/x/tools/internal/aliases"
)
// CoreType returns the core type of T or nil if T does not have a core type.
@@ -109,7 +112,7 @@ func CoreType(T types.Type) types.Type {
// _NormalTerms makes no guarantees about the order of terms, except that it
// is deterministic.
func _NormalTerms(typ types.Type) ([]*types.Term, error) {
- switch typ := typ.(type) {
+ switch typ := aliases.Unalias(typ).(type) {
case *types.TypeParam:
return StructuralTerms(typ)
case *types.Union:
@@ -120,3 +123,15 @@ func _NormalTerms(typ types.Type) ([]*types.Term, error) {
return []*types.Term{types.NewTerm(false, typ)}, nil
}
}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
new file mode 100644
index 0000000000..fea7c8b75e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -0,0 +1,43 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/aliases"
+)
+
+// ReceiverNamed returns the named type (if any) associated with the
+// type of recv, which may be of the form N or *N, or aliases thereof.
+// It also reports whether a Pointer was present.
+func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
+ t := recv.Type()
+ if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+ isPtr = true
+ t = ptr.Elem()
+ }
+ named, _ = aliases.Unalias(t).(*types.Named)
+ return
+}
+
+// Unpointer returns T given *T or an alias thereof.
+// For all other types it is the identity function.
+// It does not look at underlying types.
+// The result may be an alias.
+//
+// Use this function to strip off the optional pointer on a receiver
+// in a field or method selection, without losing the named type
+// (which is needed to compute the method set).
+//
+// See also [typeparams.MustDeref], which removes one level of
+// indirection from the type, regardless of named types (analogous to
+// a LOAD instruction).
+func Unpointer(t types.Type) types.Type {
+ if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go
index a42b072a67..ef7ea290c0 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.18
-// +build go1.18
-
package typesinternal
import (
diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go
new file mode 100644
index 0000000000..b53f178616
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/features.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+// This file contains predicates for working with file versions to
+// decide when a tool should consider a language feature enabled.
+
+// GoVersions that features in x/tools can be gated to.
+const (
+ Go1_18 = "go1.18"
+ Go1_19 = "go1.19"
+ Go1_20 = "go1.20"
+ Go1_21 = "go1.21"
+ Go1_22 = "go1.22"
+)
+
+// Future is an invalid unknown Go version sometime in the future.
+// Do not use directly with Compare.
+const Future = ""
+
+// AtLeast reports whether the file version v comes after a Go release.
+//
+// Use this predicate to enable a behavior once a certain Go release
+// has happened (and stays enabled in the future).
+func AtLeast(v, release string) bool {
+ if v == Future {
+ return true // an unknown future version is always after y.
+ }
+ return Compare(Lang(v), Lang(release)) >= 0
+}
+
+// Before reports whether the file version v is strictly before a Go release.
+//
+// Use this predicate to disable a behavior once a certain Go release
+// has happened (and stays enabled in the future).
+func Before(v, release string) bool {
+ if v == Future {
+ return false // an unknown future version happens after y.
+ }
+ return Compare(Lang(v), Lang(release)) < 0
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
new file mode 100644
index 0000000000..377bf7a53b
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+// toolchain is maximum version (<1.22) that the go toolchain used
+// to build the current tool is known to support.
+//
+// When a tool is built with >=1.22, the value of toolchain is unused.
+//
+// x/tools does not support building with go <1.18. So we take this
+// as the minimum possible maximum.
+var toolchain string = Go1_18
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
new file mode 100644
index 0000000000..f65beed9d8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.19
+// +build go1.19
+
+package versions
+
+func init() {
+ if Compare(toolchain, Go1_19) < 0 {
+ toolchain = Go1_19
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
new file mode 100644
index 0000000000..1a9efa126c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.20
+// +build go1.20
+
+package versions
+
+func init() {
+ if Compare(toolchain, Go1_20) < 0 {
+ toolchain = Go1_20
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
new file mode 100644
index 0000000000..b7ef216dfe
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
@@ -0,0 +1,14 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21
+// +build go1.21
+
+package versions
+
+func init() {
+ if Compare(toolchain, Go1_21) < 0 {
+ toolchain = Go1_21
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
index a7b79207ae..b4345d3349 100644
--- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
+++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go
@@ -12,9 +12,19 @@ import (
"go/types"
)
-// FileVersions always reports the a file's Go version as the
-// zero version at this Go version.
-func FileVersions(info *types.Info, file *ast.File) string { return "" }
+// FileVersion returns a language version (<=1.21) derived from runtime.Version()
+// or an unknown future version.
+func FileVersion(info *types.Info, file *ast.File) string {
+ // In x/tools built with Go <= 1.21, we do not have Info.FileVersions
+ // available. We use a go version derived from the toolchain used to
+ // compile the tool by default.
+ // This will be <= go1.21. We take this as the maximum version that
+ // this tool can support.
+ //
+ // There are no features currently in x/tools that need to tell fine grained
+ // differences for versions <1.22.
+ return toolchain
+}
-// InitFileVersions is a noop at this Go version.
+// InitFileVersions is a noop when compiled with this Go version.
func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
index 7b9ba89a82..e8180632a5 100644
--- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
+++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go
@@ -12,10 +12,27 @@ import (
"go/types"
)
-// FileVersions maps a file to the file's semantic Go version.
-// The reported version is the zero version if a version cannot be determined.
-func FileVersions(info *types.Info, file *ast.File) string {
- return info.FileVersions[file]
+// FileVersions returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+ // In tools built with Go >= 1.22, the Go version of a file
+ // follow a cascades of sources:
+ // 1) types.Info.FileVersion, which follows the cascade:
+ // 1.a) file version (ast.File.GoVersion),
+ // 1.b) the package version (types.Config.GoVersion), or
+ // 2) is some unknown Future version.
+ //
+ // File versions require a valid package version to be provided to types
+ // in Config.GoVersion. Config.GoVersion is either from the package's module
+ // or the toolchain (go run). This value should be provided by go/packages
+ // or unitchecker.Config.GoVersion.
+ if v := info.FileVersions[file]; IsValid(v) {
+ return v
+ }
+ // Note: we could instead return runtime.Version() [if valid].
+ // This would act as a max version on what a tool can support.
+ return Future
}
// InitFileVersions initializes info to record Go versions for Go files.
diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go
index e16f6c33a5..8d1f7453db 100644
--- a/vendor/golang.org/x/tools/internal/versions/versions.go
+++ b/vendor/golang.org/x/tools/internal/versions/versions.go
@@ -4,6 +4,10 @@
package versions
+import (
+ "strings"
+)
+
// Note: If we use build tags to use go/versions when go >=1.22,
// we run into go.dev/issue/53737. Under some operations users would see an
// import of "go/versions" even if they would not compile the file.
@@ -45,6 +49,7 @@ func IsValid(x string) bool { return isValid(stripGo(x)) }
// stripGo converts from a "go1.21" version to a "1.21" version.
// If v does not start with "go", stripGo returns the empty string (a known invalid version).
func stripGo(v string) string {
+ v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix.
if len(v) < 2 || v[:2] != "go" {
return ""
}
diff --git a/vendor/honnef.co/go/tools/go/ir/UPSTREAM b/vendor/honnef.co/go/tools/go/ir/UPSTREAM
index 757ebfd599..e92b016b39 100644
--- a/vendor/honnef.co/go/tools/go/ir/UPSTREAM
+++ b/vendor/honnef.co/go/tools/go/ir/UPSTREAM
@@ -5,5 +5,5 @@ The changes are too many to list here, and it is best to consider this package i
Upstream changes still get applied when they address bugs in portions of code we have inherited.
The last upstream commit we've looked at was:
-915f6209478fe61eb90dbe155a8a1c58655b931f
+e854e0228e2ef1cc6e42bbfde1951925096a1272
diff --git a/vendor/honnef.co/go/tools/go/ir/builder.go b/vendor/honnef.co/go/tools/go/ir/builder.go
index 1a77ed0429..82ca94ba14 100644
--- a/vendor/honnef.co/go/tools/go/ir/builder.go
+++ b/vendor/honnef.co/go/tools/go/ir/builder.go
@@ -353,11 +353,16 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) (RET lvalue) {
}
wantAddr := true
v := b.receiver(fn, e.X, wantAddr, escaping, sel, e)
- last := len(sel.Index()) - 1
- return &address{
- addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel),
- expr: e.Sel,
+ index := sel.Index()[len(sel.Index())-1]
+ vut := typeutil.CoreType(deref(v.Type())).Underlying().(*types.Struct)
+ fld := vut.Field(index)
+ // Due to the two phases of resolving AssignStmt, a panic from x.f = p()
+ // when x is nil is required to come after the side-effects of
+ // evaluating x and p().
+ emit := func(fn *Function) Value {
+ return emitFieldSelection(fn, v, index, true, e.Sel)
}
+ return &lazyAddress{addr: emit, t: fld.Type(), expr: e.Sel}
case *ast.IndexExpr:
var x Value
@@ -411,12 +416,19 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) (RET lvalue) {
panic("unexpected container type in IndexExpr: " + t.String())
}
- v := &IndexAddr{
- X: x,
- Index: b.expr(fn, e.Index),
+ // Due to the two phases of resolving AssignStmt, a panic from x[i] = p()
+ // when x is nil or i is out-of-bounds is required to come after the
+ // side-effects of evaluating x, i and p().
+ index := b.expr(fn, e.Index)
+ emit := func(fn *Function) Value {
+ v := &IndexAddr{
+ X: x,
+ Index: index,
+ }
+ v.setType(et)
+ return fn.emit(v, e)
}
- v.setType(et)
- return &address{addr: fn.emit(v, e), expr: e}
+ return &lazyAddress{addr: emit, t: deref(et), expr: e}
case *ast.StarExpr:
return &address{addr: b.expr(fn, e.X), expr: e}
@@ -680,12 +692,12 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
}
var low, high, max Value
- if e.High != nil {
- high = b.expr(fn, e.High)
- }
if e.Low != nil {
low = b.expr(fn, e.Low)
}
+ if e.High != nil {
+ high = b.expr(fn, e.High)
+ }
if e.Slice3 {
max = b.expr(fn, e.Max)
}
@@ -1027,8 +1039,7 @@ func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
// assignOp emits to fn code to perform loc = val.
func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, source ast.Node) {
- oldv := loc.load(fn, source)
- loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type(), source), loc.typ(), source), source)
+ loc.store(fn, emitArith(fn, op, loc.load(fn, source), val, loc.typ(), source), source)
}
// localValueSpec emits to fn code to define all of the vars in the
diff --git a/vendor/honnef.co/go/tools/go/ir/lift.go b/vendor/honnef.co/go/tools/go/ir/lift.go
index 8ac8330dc2..1a4cd3026d 100644
--- a/vendor/honnef.co/go/tools/go/ir/lift.go
+++ b/vendor/honnef.co/go/tools/go/ir/lift.go
@@ -970,7 +970,24 @@ func liftable(alloc *Alloc, instructions BlockMap[liftInstructions]) bool {
for i := range blocks {
// Update firstUnliftable to be one after lastLiftable. We do this to include the unliftable's preceding
// DebugRefs in the renaming.
- blocks[i].firstUnliftable = blocks[i].lastLiftable + 1
+ if blocks[i].lastLiftable == -1 && !blocks[i].storeInPreds {
+ // There are no liftable instructions (for this alloc) in this block. Set firstUnliftable to the
+ // first non-head instruction to avoid inserting the store before phi instructions, which would
+ // fail validation.
+ first := -1
+ instrLoop:
+ for i, instr := range fn.Blocks[i].Instrs {
+ switch instr.(type) {
+ case *Phi, *Sigma:
+ default:
+ first = i
+ break instrLoop
+ }
+ }
+ blocks[i].firstUnliftable = first
+ } else {
+ blocks[i].firstUnliftable = blocks[i].lastLiftable + 1
+ }
}
// If a block is reachable by a (partially) unliftable block, then the entirety of the block is unliftable. In that
diff --git a/vendor/honnef.co/go/tools/go/ir/lvalue.go b/vendor/honnef.co/go/tools/go/ir/lvalue.go
index 119eed6c3b..86eb4a5d12 100644
--- a/vendor/honnef.co/go/tools/go/ir/lvalue.go
+++ b/vendor/honnef.co/go/tools/go/ir/lvalue.go
@@ -114,6 +114,40 @@ func (e *element) typ() types.Type {
return e.t
}
+// A lazyAddress is an lvalue whose address is the result of an instruction.
+// These work like an *address except a new address.address() Value
+// is created on each load, store and address call.
+// A lazyAddress can be used to control when a side effect (nil pointer
+// dereference, index out of bounds) of using a location happens.
+type lazyAddress struct {
+ addr func(fn *Function) Value // emit to fn the computation of the address
+ t types.Type // type of the location
+ expr ast.Expr // source syntax of the value (not address) [debug mode]
+}
+
+func (l *lazyAddress) load(fn *Function, source ast.Node) Value {
+ load := emitLoad(fn, l.addr(fn), source)
+ return load
+}
+
+func (l *lazyAddress) store(fn *Function, v Value, source ast.Node) {
+ store := emitStore(fn, l.addr(fn), v, source)
+ if l.expr != nil {
+ // store.Val is v, converted for assignability.
+ emitDebugRef(fn, l.expr, store.Val, false)
+ }
+}
+
+func (l *lazyAddress) address(fn *Function) Value {
+ addr := l.addr(fn)
+ if l.expr != nil {
+ emitDebugRef(fn, l.expr, addr, true)
+ }
+ return addr
+}
+
+func (l *lazyAddress) typ() types.Type { return l.t }
+
// A blank is a dummy variable whose name is "_".
// It is not reified: loads are illegal and stores are ignored.
type blank struct{}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 37c9db8296..e9c213f615 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -7,7 +7,7 @@
# github.com/4meepo/tagalign v1.3.3
## explicit; go 1.19
github.com/4meepo/tagalign
-# github.com/Abirdcfly/dupword v0.0.13
+# github.com/Abirdcfly/dupword v0.0.14
## explicit; go 1.20
github.com/Abirdcfly/dupword
# github.com/Antonboom/errname v0.1.12
@@ -16,7 +16,7 @@ github.com/Antonboom/errname/pkg/analyzer
# github.com/Antonboom/nilnil v0.1.7
## explicit; go 1.20
github.com/Antonboom/nilnil/pkg/analyzer
-# github.com/Antonboom/testifylint v1.1.2
+# github.com/Antonboom/testifylint v1.2.0
## explicit; go 1.20
github.com/Antonboom/testifylint/analyzer
github.com/Antonboom/testifylint/internal/analysisutil
@@ -73,8 +73,8 @@ github.com/agext/levenshtein
# github.com/alecthomas/go-check-sumtype v0.1.4
## explicit; go 1.18
github.com/alecthomas/go-check-sumtype
-# github.com/alexkohler/nakedret/v2 v2.0.2
-## explicit; go 1.18
+# github.com/alexkohler/nakedret/v2 v2.0.4
+## explicit; go 1.21
github.com/alexkohler/nakedret/v2
# github.com/alexkohler/prealloc v1.0.0
## explicit; go 1.15
@@ -118,7 +118,7 @@ github.com/butuzov/ireturn/analyzer/internal/types
## explicit; go 1.19
github.com/butuzov/mirror
github.com/butuzov/mirror/internal/checker
-# github.com/catenacyber/perfsprint v0.6.0
+# github.com/catenacyber/perfsprint v0.7.1
## explicit; go 1.20
github.com/catenacyber/perfsprint/analyzer
# github.com/ccojocar/zxcvbn-go v1.0.2
@@ -141,6 +141,9 @@ github.com/charithe/durationcheck
# github.com/chavacava/garif v0.1.0
## explicit; go 1.16
github.com/chavacava/garif
+# github.com/ckaznocha/intrange v0.1.0
+## explicit; go 1.21
+github.com/ckaznocha/intrange
# github.com/client9/misspell v0.3.4
## explicit
github.com/client9/misspell
@@ -163,7 +166,7 @@ github.com/cloudflare/circl/sign/ed448
## explicit; go 1.18
github.com/curioswitch/go-reassign
github.com/curioswitch/go-reassign/internal/analyzer
-# github.com/daixiang0/gci v0.12.1
+# github.com/daixiang0/gci v0.12.3
## explicit; go 1.18
github.com/daixiang0/gci/pkg/config
github.com/daixiang0/gci/pkg/format
@@ -177,12 +180,9 @@ github.com/daixiang0/gci/pkg/utils
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
-# github.com/denis-tingaikin/go-header v0.4.3
-## explicit; go 1.17
+# github.com/denis-tingaikin/go-header v0.5.0
+## explicit; go 1.21
github.com/denis-tingaikin/go-header
-# github.com/esimonov/ifshort v1.0.4
-## explicit; go 1.17
-github.com/esimonov/ifshort/pkg/analyzer
# github.com/ettle/strcase v0.2.0
## explicit; go 1.12
github.com/ettle/strcase
@@ -201,10 +201,10 @@ github.com/fsnotify/fsnotify
# github.com/fzipp/gocyclo v0.6.0
## explicit; go 1.18
github.com/fzipp/gocyclo
-# github.com/ghostiam/protogetter v0.3.4
+# github.com/ghostiam/protogetter v0.3.5
## explicit; go 1.19
github.com/ghostiam/protogetter
-# github.com/go-critic/go-critic v0.11.1
+# github.com/go-critic/go-critic v0.11.2
## explicit; go 1.18
github.com/go-critic/go-critic/checkers
github.com/go-critic/go-critic/checkers/internal/astwalk
@@ -260,10 +260,6 @@ github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/empty
github.com/golang/protobuf/ptypes/timestamp
-# github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2
-## explicit
-github.com/golangci/check/cmd/structcheck
-github.com/golangci/check/cmd/varcheck
# github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a
## explicit
github.com/golangci/dupl
@@ -272,15 +268,12 @@ github.com/golangci/dupl/printer
github.com/golangci/dupl/suffixtree
github.com/golangci/dupl/syntax
github.com/golangci/dupl/syntax/golang
-# github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe
-## explicit; go 1.17
-github.com/golangci/go-misc/deadcode
# github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e
## explicit; go 1.20
github.com/golangci/gofmt/gofmt
github.com/golangci/gofmt/gofmt/internal/diff
github.com/golangci/gofmt/goimports
-# github.com/golangci/golangci-lint v1.56.2
+# github.com/golangci/golangci-lint v1.57.1
## explicit; go 1.21
github.com/golangci/golangci-lint/cmd/golangci-lint
github.com/golangci/golangci-lint/internal/cache
@@ -289,6 +282,7 @@ github.com/golangci/golangci-lint/internal/pkgcache
github.com/golangci/golangci-lint/internal/renameio
github.com/golangci/golangci-lint/internal/robustio
github.com/golangci/golangci-lint/pkg/commands
+github.com/golangci/golangci-lint/pkg/commands/internal
github.com/golangci/golangci-lint/pkg/config
github.com/golangci/golangci-lint/pkg/exitcodes
github.com/golangci/golangci-lint/pkg/fsutils
@@ -307,20 +301,17 @@ github.com/golangci/golangci-lint/pkg/report
github.com/golangci/golangci-lint/pkg/result
github.com/golangci/golangci-lint/pkg/result/processors
github.com/golangci/golangci-lint/pkg/timeutils
-# github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0
-## explicit
-github.com/golangci/lint-1
-# github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca
-## explicit
-github.com/golangci/maligned
# github.com/golangci/misspell v0.4.1
## explicit; go 1.19
github.com/golangci/misspell
+# github.com/golangci/plugin-module-register v0.1.1
+## explicit; go 1.21
+github.com/golangci/plugin-module-register/register
# github.com/golangci/revgrep v0.5.2
## explicit; go 1.19
github.com/golangci/revgrep
-# github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4
-## explicit
+# github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed
+## explicit; go 1.20
github.com/golangci/unconvert
# github.com/google/go-cmp v0.6.0
## explicit; go 1.13
@@ -510,26 +501,25 @@ github.com/jingyugao/rowserrcheck/passes/rowserr
# github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af
## explicit; go 1.13
github.com/jirfag/go-printf-func-name/pkg/analyzer
-# github.com/jjti/go-spancheck v0.5.2
+# github.com/jjti/go-spancheck v0.5.3
## explicit; go 1.20
github.com/jjti/go-spancheck
# github.com/julz/importas v0.1.0
## explicit; go 1.15
github.com/julz/importas
+# github.com/karamaru-alpha/copyloopvar v1.0.8
+## explicit; go 1.21
+github.com/karamaru-alpha/copyloopvar
# github.com/kisielk/errcheck v1.7.0
## explicit; go 1.18
github.com/kisielk/errcheck/errcheck
-# github.com/kisielk/gotool v1.0.0
-## explicit
-github.com/kisielk/gotool
-github.com/kisielk/gotool/internal/load
# github.com/kkHAIKE/contextcheck v1.1.4
## explicit; go 1.20
github.com/kkHAIKE/contextcheck
# github.com/kulti/thelper v0.6.3
## explicit; go 1.18
github.com/kulti/thelper/pkg/analyzer
-# github.com/kunwardeep/paralleltest v1.0.9
+# github.com/kunwardeep/paralleltest v1.0.10
## explicit; go 1.17
github.com/kunwardeep/paralleltest/pkg/paralleltest
# github.com/kyoh86/exportloopref v0.1.11
@@ -579,9 +569,6 @@ github.com/mattn/go-runewidth
# github.com/matttproud/golang_protobuf_extensions v1.0.1
## explicit
github.com/matttproud/golang_protobuf_extensions/pbutil
-# github.com/mbilski/exhaustivestruct v1.2.0
-## explicit; go 1.15
-github.com/mbilski/exhaustivestruct/pkg/analyzer
# github.com/mgechev/revive v1.3.7
## explicit; go 1.20
github.com/mgechev/revive/config
@@ -624,13 +611,16 @@ github.com/nishanths/exhaustive
# github.com/nishanths/predeclared v0.2.2
## explicit; go 1.14
github.com/nishanths/predeclared/passes/predeclared
-# github.com/nunnatsa/ginkgolinter v0.15.2
-## explicit; go 1.20
+# github.com/nunnatsa/ginkgolinter v0.16.1
+## explicit; go 1.21
github.com/nunnatsa/ginkgolinter
-github.com/nunnatsa/ginkgolinter/ginkgohandler
-github.com/nunnatsa/ginkgolinter/gomegahandler
-github.com/nunnatsa/ginkgolinter/interfaces
-github.com/nunnatsa/ginkgolinter/reverseassertion
+github.com/nunnatsa/ginkgolinter/internal/ginkgohandler
+github.com/nunnatsa/ginkgolinter/internal/gomegahandler
+github.com/nunnatsa/ginkgolinter/internal/interfaces
+github.com/nunnatsa/ginkgolinter/internal/intervals
+github.com/nunnatsa/ginkgolinter/internal/reports
+github.com/nunnatsa/ginkgolinter/internal/reverseassertion
+github.com/nunnatsa/ginkgolinter/linter
github.com/nunnatsa/ginkgolinter/types
github.com/nunnatsa/ginkgolinter/version
# github.com/oklog/run v1.0.0
@@ -642,12 +632,13 @@ github.com/olekukonko/tablewriter
# github.com/pelletier/go-toml v1.9.5
## explicit; go 1.12
github.com/pelletier/go-toml
-# github.com/pelletier/go-toml/v2 v2.0.5
+# github.com/pelletier/go-toml/v2 v2.2.0
## explicit; go 1.16
github.com/pelletier/go-toml/v2
-github.com/pelletier/go-toml/v2/internal/ast
+github.com/pelletier/go-toml/v2/internal/characters
github.com/pelletier/go-toml/v2/internal/danger
github.com/pelletier/go-toml/v2/internal/tracker
+github.com/pelletier/go-toml/v2/unstable
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
@@ -672,7 +663,7 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/quasilyte/go-ruleguard v0.4.0
+# github.com/quasilyte/go-ruleguard v0.4.2
## explicit; go 1.19
github.com/quasilyte/go-ruleguard/internal/goenv
github.com/quasilyte/go-ruleguard/internal/golist
@@ -700,8 +691,8 @@ github.com/quasilyte/regex/syntax
# github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567
## explicit; go 1.17
github.com/quasilyte/stdinfo
-# github.com/ryancurrah/gomodguard v1.3.0
-## explicit; go 1.19
+# github.com/ryancurrah/gomodguard v1.3.1
+## explicit; go 1.21
github.com/ryancurrah/gomodguard
# github.com/ryanrolds/sqlclosecheck v0.5.1
## explicit; go 1.20
@@ -709,6 +700,10 @@ github.com/ryanrolds/sqlclosecheck/pkg/analyzer
# github.com/sanposhiho/wastedassign/v2 v2.0.7
## explicit; go 1.14
github.com/sanposhiho/wastedassign/v2
+# github.com/santhosh-tekuri/jsonschema/v5 v5.3.1
+## explicit; go 1.19
+github.com/santhosh-tekuri/jsonschema/v5
+github.com/santhosh-tekuri/jsonschema/v5/httploader
# github.com/sashamelentyev/interfacebloat v1.1.0
## explicit; go 1.18
github.com/sashamelentyev/interfacebloat/pkg/analyzer
@@ -740,9 +735,6 @@ github.com/sirupsen/logrus
# github.com/sivchari/containedctx v1.0.3
## explicit; go 1.17
github.com/sivchari/containedctx
-# github.com/sivchari/nosnakecase v1.7.0
-## explicit; go 1.18
-github.com/sivchari/nosnakecase
# github.com/sivchari/tenv v1.7.1
## explicit; go 1.18
github.com/sivchari/tenv
@@ -819,8 +811,8 @@ github.com/timonwong/loggercheck/internal/checkers/printf
github.com/timonwong/loggercheck/internal/rules
github.com/timonwong/loggercheck/internal/sets
github.com/timonwong/loggercheck/internal/stringutil
-# github.com/tomarrell/wrapcheck/v2 v2.8.1
-## explicit; go 1.18
+# github.com/tomarrell/wrapcheck/v2 v2.8.3
+## explicit; go 1.21
github.com/tomarrell/wrapcheck/v2/wrapcheck
# github.com/tommy-muehle/go-mnd/v2 v2.5.1
## explicit; go 1.12
@@ -876,15 +868,20 @@ github.com/zclconf/go-cty/cty/set
# gitlab.com/bosi/decorder v0.4.1
## explicit; go 1.20
gitlab.com/bosi/decorder
-# go-simpler.org/musttag v0.8.0
+# go-simpler.org/musttag v0.9.0
## explicit; go 1.20
go-simpler.org/musttag
-# go-simpler.org/sloglint v0.4.0
+# go-simpler.org/sloglint v0.5.0
## explicit; go 1.20
go-simpler.org/sloglint
# go.uber.org/atomic v1.7.0
## explicit; go 1.13
go.uber.org/atomic
+# go.uber.org/automaxprocs v1.5.3
+## explicit; go 1.18
+go.uber.org/automaxprocs/internal/cgroups
+go.uber.org/automaxprocs/internal/runtime
+go.uber.org/automaxprocs/maxprocs
# go.uber.org/multierr v1.6.0
## explicit; go 1.12
go.uber.org/multierr
@@ -919,11 +916,13 @@ golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
# golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc
## explicit; go 1.20
+golang.org/x/exp/constraints
golang.org/x/exp/maps
-# golang.org/x/exp/typeparams v0.0.0-20231219180239-dc181d75b848
+golang.org/x/exp/slices
+# golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f
## explicit; go 1.18
golang.org/x/exp/typeparams
-# golang.org/x/mod v0.15.0
+# golang.org/x/mod v0.16.0
## explicit; go 1.18
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
@@ -959,8 +958,8 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
-# golang.org/x/tools v0.18.0
-## explicit; go 1.18
+# golang.org/x/tools v0.19.0
+## explicit; go 1.19
golang.org/x/tools/go/analysis
golang.org/x/tools/go/analysis/passes/appends
golang.org/x/tools/go/analysis/passes/asmdecl
@@ -1169,7 +1168,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# honnef.co/go/tools v0.4.6
+# honnef.co/go/tools v0.4.7
## explicit; go 1.19
honnef.co/go/tools/analysis/code
honnef.co/go/tools/analysis/edit
@@ -1206,12 +1205,6 @@ mvdan.cc/gofumpt/internal/govendor/go/doc/comment
mvdan.cc/gofumpt/internal/govendor/go/format
mvdan.cc/gofumpt/internal/govendor/go/printer
mvdan.cc/gofumpt/internal/version
-# mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed
-## explicit
-mvdan.cc/interfacer/check
-# mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b
-## explicit
-mvdan.cc/lint
# mvdan.cc/unparam v0.0.0-20240104100049-c549a3470d14
## explicit; go 1.20
mvdan.cc/unparam/check
diff --git a/vendor/mvdan.cc/interfacer/LICENSE b/vendor/mvdan.cc/interfacer/LICENSE
deleted file mode 100644
index 7d71d51a5e..0000000000
--- a/vendor/mvdan.cc/interfacer/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2015, Daniel Martí. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of the copyright holder nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/mvdan.cc/interfacer/check/cache.go b/vendor/mvdan.cc/interfacer/check/cache.go
deleted file mode 100644
index 757eca55e1..0000000000
--- a/vendor/mvdan.cc/interfacer/check/cache.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2015, Daniel Martí
-// See LICENSE for licensing information
-
-package check
-
-import (
- "go/ast"
- "go/types"
-)
-
-type pkgTypes struct {
- ifaces map[string]string
- funcSigns map[string]bool
-}
-
-func (p *pkgTypes) getTypes(pkg *types.Package) {
- p.ifaces = make(map[string]string)
- p.funcSigns = make(map[string]bool)
- done := make(map[*types.Package]bool)
- addTypes := func(pkg *types.Package, top bool) {
- if done[pkg] {
- return
- }
- done[pkg] = true
- ifs, funs := fromScope(pkg.Scope())
- fullName := func(name string) string {
- if !top {
- return pkg.Path() + "." + name
- }
- return name
- }
- for iftype, name := range ifs {
- // only suggest exported interfaces
- if ast.IsExported(name) {
- p.ifaces[iftype] = fullName(name)
- }
- }
- for ftype := range funs {
- // ignore non-exported func signatures too
- p.funcSigns[ftype] = true
- }
- }
- for _, imp := range pkg.Imports() {
- addTypes(imp, false)
- for _, imp2 := range imp.Imports() {
- addTypes(imp2, false)
- }
- }
- addTypes(pkg, true)
-}
diff --git a/vendor/mvdan.cc/interfacer/check/check.go b/vendor/mvdan.cc/interfacer/check/check.go
deleted file mode 100644
index f4d3b4037b..0000000000
--- a/vendor/mvdan.cc/interfacer/check/check.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright (c) 2015, Daniel Martí
-// See LICENSE for licensing information
-
-package check // import "mvdan.cc/interfacer/check"
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "os"
- "strings"
-
- "golang.org/x/tools/go/loader"
- "golang.org/x/tools/go/ssa"
- "golang.org/x/tools/go/ssa/ssautil"
-
- "github.com/kisielk/gotool"
- "mvdan.cc/lint"
-)
-
-func toDiscard(usage *varUsage) bool {
- if usage.discard {
- return true
- }
- for to := range usage.assigned {
- if toDiscard(to) {
- return true
- }
- }
- return false
-}
-
-func allCalls(usage *varUsage, all, ftypes map[string]string) {
- for fname := range usage.calls {
- all[fname] = ftypes[fname]
- }
- for to := range usage.assigned {
- allCalls(to, all, ftypes)
- }
-}
-
-func (c *Checker) interfaceMatching(param *types.Var, usage *varUsage) (string, string) {
- if toDiscard(usage) {
- return "", ""
- }
- ftypes := typeFuncMap(param.Type())
- called := make(map[string]string, len(usage.calls))
- allCalls(usage, called, ftypes)
- s := funcMapString(called)
- return c.ifaces[s], s
-}
-
-type varUsage struct {
- calls map[string]struct{}
- discard bool
-
- assigned map[*varUsage]struct{}
-}
-
-type funcDecl struct {
- astDecl *ast.FuncDecl
- ssaFn *ssa.Function
-}
-
-// CheckArgs checks the packages specified by their import paths in
-// args.
-func CheckArgs(args []string) ([]string, error) {
- paths := gotool.ImportPaths(args)
- conf := loader.Config{}
- conf.AllowErrors = true
- rest, err := conf.FromArgs(paths, false)
- if err != nil {
- return nil, err
- }
- if len(rest) > 0 {
- return nil, fmt.Errorf("unwanted extra args: %v", rest)
- }
- lprog, err := conf.Load()
- if err != nil {
- return nil, err
- }
- prog := ssautil.CreateProgram(lprog, 0)
- prog.Build()
- c := new(Checker)
- c.Program(lprog)
- c.ProgramSSA(prog)
- issues, err := c.Check()
- if err != nil {
- return nil, err
- }
- wd, err := os.Getwd()
- if err != nil {
- return nil, err
- }
- lines := make([]string, len(issues))
- for i, issue := range issues {
- fpos := prog.Fset.Position(issue.Pos()).String()
- if strings.HasPrefix(fpos, wd) {
- fpos = fpos[len(wd)+1:]
- }
- lines[i] = fmt.Sprintf("%s: %s", fpos, issue.Message())
- }
- return lines, nil
-}
-
-type Checker struct {
- lprog *loader.Program
- prog *ssa.Program
-
- pkgTypes
- *loader.PackageInfo
-
- funcs []*funcDecl
-
- ssaByPos map[token.Pos]*ssa.Function
-
- discardFuncs map[*types.Signature]struct{}
-
- vars map[*types.Var]*varUsage
-}
-
-var (
- _ lint.Checker = (*Checker)(nil)
- _ lint.WithSSA = (*Checker)(nil)
-)
-
-func (c *Checker) Program(lprog *loader.Program) {
- c.lprog = lprog
-}
-
-func (c *Checker) ProgramSSA(prog *ssa.Program) {
- c.prog = prog
-}
-
-func (c *Checker) Check() ([]lint.Issue, error) {
- var total []lint.Issue
- c.ssaByPos = make(map[token.Pos]*ssa.Function)
- wantPkg := make(map[*types.Package]bool)
- for _, pinfo := range c.lprog.InitialPackages() {
- wantPkg[pinfo.Pkg] = true
- }
- for fn := range ssautil.AllFunctions(c.prog) {
- if fn.Pkg == nil { // builtin?
- continue
- }
- if len(fn.Blocks) == 0 { // stub
- continue
- }
- if !wantPkg[fn.Pkg.Pkg] { // not part of given pkgs
- continue
- }
- c.ssaByPos[fn.Pos()] = fn
- }
- for _, pinfo := range c.lprog.InitialPackages() {
- pkg := pinfo.Pkg
- c.getTypes(pkg)
- c.PackageInfo = c.lprog.AllPackages[pkg]
- total = append(total, c.checkPkg()...)
- }
- return total, nil
-}
-
-func (c *Checker) checkPkg() []lint.Issue {
- c.discardFuncs = make(map[*types.Signature]struct{})
- c.vars = make(map[*types.Var]*varUsage)
- c.funcs = c.funcs[:0]
- findFuncs := func(node ast.Node) bool {
- decl, ok := node.(*ast.FuncDecl)
- if !ok {
- return true
- }
- ssaFn := c.ssaByPos[decl.Name.Pos()]
- if ssaFn == nil {
- return true
- }
- fd := &funcDecl{
- astDecl: decl,
- ssaFn: ssaFn,
- }
- if c.funcSigns[signString(fd.ssaFn.Signature)] {
- // implements interface
- return true
- }
- c.funcs = append(c.funcs, fd)
- ast.Walk(c, decl.Body)
- return true
- }
- for _, f := range c.Files {
- ast.Inspect(f, findFuncs)
- }
- return c.packageIssues()
-}
-
-func paramVarAndType(sign *types.Signature, i int) (*types.Var, types.Type) {
- params := sign.Params()
- extra := sign.Variadic() && i >= params.Len()-1
- if !extra {
- if i >= params.Len() {
- // builtins with multiple signatures
- return nil, nil
- }
- vr := params.At(i)
- return vr, vr.Type()
- }
- last := params.At(params.Len() - 1)
- switch x := last.Type().(type) {
- case *types.Slice:
- return nil, x.Elem()
- default:
- return nil, x
- }
-}
-
-func (c *Checker) varUsage(e ast.Expr) *varUsage {
- id, ok := e.(*ast.Ident)
- if !ok {
- return nil
- }
- param, ok := c.ObjectOf(id).(*types.Var)
- if !ok {
- // not a variable
- return nil
- }
- if usage, e := c.vars[param]; e {
- return usage
- }
- if !interesting(param.Type()) {
- return nil
- }
- usage := &varUsage{
- calls: make(map[string]struct{}),
- assigned: make(map[*varUsage]struct{}),
- }
- c.vars[param] = usage
- return usage
-}
-
-func (c *Checker) addUsed(e ast.Expr, as types.Type) {
- if as == nil {
- return
- }
- if usage := c.varUsage(e); usage != nil {
- // using variable
- iface, ok := as.Underlying().(*types.Interface)
- if !ok {
- usage.discard = true
- return
- }
- for i := 0; i < iface.NumMethods(); i++ {
- m := iface.Method(i)
- usage.calls[m.Name()] = struct{}{}
- }
- } else if t, ok := c.TypeOf(e).(*types.Signature); ok {
- // using func
- c.discardFuncs[t] = struct{}{}
- }
-}
-
-func (c *Checker) addAssign(to, from ast.Expr) {
- pto := c.varUsage(to)
- pfrom := c.varUsage(from)
- if pto == nil || pfrom == nil {
- // either isn't interesting
- return
- }
- pfrom.assigned[pto] = struct{}{}
-}
-
-func (c *Checker) discard(e ast.Expr) {
- if usage := c.varUsage(e); usage != nil {
- usage.discard = true
- }
-}
-
-func (c *Checker) comparedWith(e, with ast.Expr) {
- if _, ok := with.(*ast.BasicLit); ok {
- c.discard(e)
- }
-}
-
-func (c *Checker) Visit(node ast.Node) ast.Visitor {
- switch x := node.(type) {
- case *ast.SelectorExpr:
- if _, ok := c.TypeOf(x.Sel).(*types.Signature); !ok {
- c.discard(x.X)
- }
- case *ast.StarExpr:
- c.discard(x.X)
- case *ast.UnaryExpr:
- c.discard(x.X)
- case *ast.IndexExpr:
- c.discard(x.X)
- case *ast.IncDecStmt:
- c.discard(x.X)
- case *ast.BinaryExpr:
- switch x.Op {
- case token.EQL, token.NEQ:
- c.comparedWith(x.X, x.Y)
- c.comparedWith(x.Y, x.X)
- default:
- c.discard(x.X)
- c.discard(x.Y)
- }
- case *ast.ValueSpec:
- for _, val := range x.Values {
- c.addUsed(val, c.TypeOf(x.Type))
- }
- case *ast.AssignStmt:
- for i, val := range x.Rhs {
- left := x.Lhs[i]
- if x.Tok == token.ASSIGN {
- c.addUsed(val, c.TypeOf(left))
- }
- c.addAssign(left, val)
- }
- case *ast.CompositeLit:
- for i, e := range x.Elts {
- switch y := e.(type) {
- case *ast.KeyValueExpr:
- c.addUsed(y.Key, c.TypeOf(y.Value))
- c.addUsed(y.Value, c.TypeOf(y.Key))
- case *ast.Ident:
- c.addUsed(y, compositeIdentType(c.TypeOf(x), i))
- }
- }
- case *ast.CallExpr:
- switch y := c.TypeOf(x.Fun).Underlying().(type) {
- case *types.Signature:
- c.onMethodCall(x, y)
- default:
- // type conversion
- if len(x.Args) == 1 {
- c.addUsed(x.Args[0], y)
- }
- }
- }
- return c
-}
-
-func compositeIdentType(t types.Type, i int) types.Type {
- switch x := t.(type) {
- case *types.Named:
- return compositeIdentType(x.Underlying(), i)
- case *types.Struct:
- return x.Field(i).Type()
- case *types.Array:
- return x.Elem()
- case *types.Slice:
- return x.Elem()
- }
- return nil
-}
-
-func (c *Checker) onMethodCall(ce *ast.CallExpr, sign *types.Signature) {
- for i, e := range ce.Args {
- paramObj, t := paramVarAndType(sign, i)
- // Don't if this is a parameter being re-used as itself
- // in a recursive call
- if id, ok := e.(*ast.Ident); ok {
- if paramObj == c.ObjectOf(id) {
- continue
- }
- }
- c.addUsed(e, t)
- }
- sel, ok := ce.Fun.(*ast.SelectorExpr)
- if !ok {
- return
- }
- // receiver func call on the left side
- if usage := c.varUsage(sel.X); usage != nil {
- usage.calls[sel.Sel.Name] = struct{}{}
- }
-}
-
-func (fd *funcDecl) paramGroups() [][]*types.Var {
- astList := fd.astDecl.Type.Params.List
- groups := make([][]*types.Var, len(astList))
- signIndex := 0
- for i, field := range astList {
- group := make([]*types.Var, len(field.Names))
- for j := range field.Names {
- group[j] = fd.ssaFn.Signature.Params().At(signIndex)
- signIndex++
- }
- groups[i] = group
- }
- return groups
-}
-
-func (c *Checker) packageIssues() []lint.Issue {
- var issues []lint.Issue
- for _, fd := range c.funcs {
- if _, e := c.discardFuncs[fd.ssaFn.Signature]; e {
- continue
- }
- for _, group := range fd.paramGroups() {
- issues = append(issues, c.groupIssues(fd, group)...)
- }
- }
- return issues
-}
-
-type Issue struct {
- pos token.Pos
- msg string
-}
-
-func (i Issue) Pos() token.Pos { return i.pos }
-func (i Issue) Message() string { return i.msg }
-
-func (c *Checker) groupIssues(fd *funcDecl, group []*types.Var) []lint.Issue {
- var issues []lint.Issue
- for _, param := range group {
- usage := c.vars[param]
- if usage == nil {
- return nil
- }
- newType := c.paramNewType(fd.astDecl.Name.Name, param, usage)
- if newType == "" {
- return nil
- }
- issues = append(issues, Issue{
- pos: param.Pos(),
- msg: fmt.Sprintf("%s can be %s", param.Name(), newType),
- })
- }
- return issues
-}
-
-func willAddAllocation(t types.Type) bool {
- switch t.Underlying().(type) {
- case *types.Pointer, *types.Interface:
- return false
- }
- return true
-}
-
-func (c *Checker) paramNewType(funcName string, param *types.Var, usage *varUsage) string {
- t := param.Type()
- if !ast.IsExported(funcName) && willAddAllocation(t) {
- return ""
- }
- if named := typeNamed(t); named != nil {
- tname := named.Obj().Name()
- vname := param.Name()
- if mentionsName(funcName, tname) || mentionsName(funcName, vname) {
- return ""
- }
- }
- ifname, iftype := c.interfaceMatching(param, usage)
- if ifname == "" {
- return ""
- }
- if types.IsInterface(t.Underlying()) {
- if have := funcMapString(typeFuncMap(t)); have == iftype {
- return ""
- }
- }
- return ifname
-}
diff --git a/vendor/mvdan.cc/interfacer/check/types.go b/vendor/mvdan.cc/interfacer/check/types.go
deleted file mode 100644
index 393bb0b9fa..0000000000
--- a/vendor/mvdan.cc/interfacer/check/types.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright (c) 2015, Daniel Martí
-// See LICENSE for licensing information
-
-package check
-
-import (
- "bytes"
- "fmt"
- "go/types"
- "sort"
- "strings"
-)
-
-type methoder interface {
- NumMethods() int
- Method(int) *types.Func
-}
-
-func methoderFuncMap(m methoder, skip bool) map[string]string {
- ifuncs := make(map[string]string, m.NumMethods())
- for i := 0; i < m.NumMethods(); i++ {
- f := m.Method(i)
- if !f.Exported() {
- if skip {
- continue
- }
- return nil
- }
- sign := f.Type().(*types.Signature)
- ifuncs[f.Name()] = signString(sign)
- }
- return ifuncs
-}
-
-func typeFuncMap(t types.Type) map[string]string {
- switch x := t.(type) {
- case *types.Pointer:
- return typeFuncMap(x.Elem())
- case *types.Named:
- u := x.Underlying()
- if types.IsInterface(u) {
- return typeFuncMap(u)
- }
- return methoderFuncMap(x, true)
- case *types.Interface:
- return methoderFuncMap(x, false)
- default:
- return nil
- }
-}
-
-func funcMapString(iface map[string]string) string {
- fnames := make([]string, 0, len(iface))
- for fname := range iface {
- fnames = append(fnames, fname)
- }
- sort.Strings(fnames)
- var b bytes.Buffer
- for i, fname := range fnames {
- if i > 0 {
- fmt.Fprint(&b, "; ")
- }
- fmt.Fprint(&b, fname, iface[fname])
- }
- return b.String()
-}
-
-func tupleJoin(buf *bytes.Buffer, t *types.Tuple) {
- buf.WriteByte('(')
- for i := 0; i < t.Len(); i++ {
- if i > 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(t.At(i).Type().String())
- }
- buf.WriteByte(')')
-}
-
-// signString is similar to Signature.String(), but it ignores
-// param/result names.
-func signString(sign *types.Signature) string {
- var buf bytes.Buffer
- tupleJoin(&buf, sign.Params())
- tupleJoin(&buf, sign.Results())
- return buf.String()
-}
-
-func interesting(t types.Type) bool {
- switch x := t.(type) {
- case *types.Interface:
- return x.NumMethods() > 1
- case *types.Named:
- if u := x.Underlying(); types.IsInterface(u) {
- return interesting(u)
- }
- return x.NumMethods() >= 1
- case *types.Pointer:
- return interesting(x.Elem())
- default:
- return false
- }
-}
-
-func anyInteresting(params *types.Tuple) bool {
- for i := 0; i < params.Len(); i++ {
- t := params.At(i).Type()
- if interesting(t) {
- return true
- }
- }
- return false
-}
-
-func fromScope(scope *types.Scope) (ifaces map[string]string, funcs map[string]bool) {
- ifaces = make(map[string]string)
- funcs = make(map[string]bool)
- for _, name := range scope.Names() {
- tn, ok := scope.Lookup(name).(*types.TypeName)
- if !ok {
- continue
- }
- switch x := tn.Type().Underlying().(type) {
- case *types.Interface:
- iface := methoderFuncMap(x, false)
- if len(iface) == 0 {
- continue
- }
- for i := 0; i < x.NumMethods(); i++ {
- f := x.Method(i)
- sign := f.Type().(*types.Signature)
- if !anyInteresting(sign.Params()) {
- continue
- }
- funcs[signString(sign)] = true
- }
- s := funcMapString(iface)
- if _, e := ifaces[s]; !e {
- ifaces[s] = tn.Name()
- }
- case *types.Signature:
- if !anyInteresting(x.Params()) {
- continue
- }
- funcs[signString(x)] = true
- }
- }
- return ifaces, funcs
-}
-
-func mentionsName(fname, name string) bool {
- if len(name) < 2 {
- return false
- }
- capit := strings.ToUpper(name[:1]) + name[1:]
- lower := strings.ToLower(name)
- return strings.Contains(fname, capit) || strings.HasPrefix(fname, lower)
-}
-
-func typeNamed(t types.Type) *types.Named {
- for {
- switch x := t.(type) {
- case *types.Named:
- return x
- case *types.Pointer:
- t = x.Elem()
- default:
- return nil
- }
- }
-}
diff --git a/vendor/mvdan.cc/lint/.travis.yml b/vendor/mvdan.cc/lint/.travis.yml
deleted file mode 100644
index 2ccdeab9ad..0000000000
--- a/vendor/mvdan.cc/lint/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-
-go:
- - 1.8.x
- - 1.9.x
-
-go_import_path: mvdan.cc/lint
diff --git a/vendor/mvdan.cc/lint/LICENSE b/vendor/mvdan.cc/lint/LICENSE
deleted file mode 100644
index a06c5ebfc8..0000000000
--- a/vendor/mvdan.cc/lint/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2017, Daniel Martí. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of the copyright holder nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/mvdan.cc/lint/README.md b/vendor/mvdan.cc/lint/README.md
deleted file mode 100644
index 8a9c8b51c3..0000000000
--- a/vendor/mvdan.cc/lint/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# lint
-
-[![GoDoc](https://godoc.org/mvdan.cc/lint?status.svg)](https://godoc.org/mvdan.cc/lint)
-[![Build Status](https://travis-ci.org/mvdan/lint.svg?branch=master)](https://travis-ci.org/mvdan/lint)
-
-Work in progress. Its API might change before the 1.0 release.
-
-This package intends to define simple interfaces that Go code checkers
-can implement. This would simplify calling them from Go code, as well as
-running multiple linters while sharing initial loading work.
-
-### metalint
-
- go get -u mvdan.cc/lint/cmd/metalint
-
-The start of a linter that runs many linters leveraging the common
-interface. Not stable yet.
-
-Linters included:
-
-* [unparam](https://mvdan.cc/unparam)
-* [interfacer](https://github.com/mvdan/interfacer)
-
-### Related projects
-
-* [golinters](https://github.com/thomasheller/golinters) - Report on
- linter support
diff --git a/vendor/mvdan.cc/lint/lint.go b/vendor/mvdan.cc/lint/lint.go
deleted file mode 100644
index a16789fad5..0000000000
--- a/vendor/mvdan.cc/lint/lint.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) 2017, Daniel Martí
-// See LICENSE for licensing information
-
-// Package lint defines common interfaces for Go code checkers.
-package lint // import "mvdan.cc/lint"
-
-import (
- "go/token"
-
- "golang.org/x/tools/go/loader"
- "golang.org/x/tools/go/ssa"
-)
-
-// A Checker points out issues in a program.
-type Checker interface {
- Program(*loader.Program)
- Check() ([]Issue, error)
-}
-
-type WithSSA interface {
- ProgramSSA(*ssa.Program)
-}
-
-// Issue represents an issue somewhere in a source code file.
-type Issue interface {
- Pos() token.Pos
- Message() string
-}