From 358814f6948b46245153627ba4e8b74e06e1c91a Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 11 Apr 2024 10:37:50 +0200 Subject: [PATCH 1/5] vendor: update containers/(common|storage) Signed-off-by: Giuseppe Scrivano --- go.mod | 28 +- go.sum | 56 +-- run_linux.go | 5 +- tests/authenticate.bats | 4 +- tests/bud.bats | 2 +- tests/from.bats | 2 +- .../containers/common/libimage/load.go | 41 +- .../common/libimage/manifest_list.go | 55 +-- .../common/libimage/manifests/manifests.go | 19 +- .../containers/common/libimage/pull.go | 9 + .../common/libnetwork/cni/cni_types.go | 4 +- .../common/libnetwork/cni/network.go | 3 +- .../internal/rootlessnetns/netns_linux.go | 110 ++++-- .../common/libnetwork/netavark/config.go | 5 + .../common/libnetwork/netavark/ipam.go | 21 + .../common/libnetwork/netavark/network.go | 8 +- .../common/libnetwork/resolvconf/resolv.go | 3 +- .../common/pkg/apparmor/apparmor_linux.go | 3 +- .../containers/common/pkg/auth/auth.go | 3 +- .../common/pkg/cgroups/cgroups_linux.go | 5 +- .../common/pkg/cgroups/utils_linux.go | 3 +- .../containers/common/pkg/config/config.go | 14 +- .../common/pkg/config/config_local.go | 3 +- .../common/pkg/config/containers.conf | 6 + .../common/pkg/config/containers.conf-freebsd | 6 + .../containers/common/pkg/config/default.go | 8 +- .../containers/common/pkg/config/modules.go | 6 +- .../containers/common/pkg/config/new.go | 5 +- .../common/pkg/config/pull_policy.go | 11 +- .../containers/common/pkg/hooks/1.0.0/hook.go | 4 +- .../common/pkg/manifests/manifests.go | 20 +- .../containers/common/pkg/parse/parse.go | 5 +- .../common/pkg/subscriptions/subscriptions.go | 13 +- .../containers/common/pkg/umask/umask.go | 4 +- .../containers/common/pkg/util/util.go | 5 +- .../containers/common/pkg/version/version.go | 63 ++- .../github.com/containers/storage/.cirrus.yml | 14 +- vendor/github.com/containers/storage/VERSION | 2 +- .../containers/storage/drivers/aufs/aufs.go | 5 +- .../containers/storage/drivers/btrfs/btrfs.go | 7 +- .../storage/drivers/devmapper/deviceset.go | 5 +- .../storage/drivers/devmapper/driver.go | 3 +- .../containers/storage/drivers/driver.go | 3 +- .../storage/drivers/overlay/overlay.go | 48 +-- .../containers/storage/drivers/vfs/driver.go | 5 +- .../storage/drivers/windows/windows.go | 5 +- .../containers/storage/pkg/archive/archive.go | 2 +- .../containers/storage/pkg/archive/changes.go | 3 +- .../containers/storage/pkg/archive/copy.go | 3 +- .../containers/storage/pkg/archive/diff.go | 5 +- .../storage/pkg/chrootarchive/archive.go | 3 +- .../storage/pkg/chunked/cache_linux.go | 362 +++++++++++------- .../pkg/chunked/internal/compression.go | 6 - .../storage/pkg/chunked/storage_linux.go | 103 +++-- .../storage/pkg/fileutils/exists_unix.go | 34 ++ .../storage/pkg/fileutils/exists_windows.go | 18 + .../storage/pkg/fileutils/fileutils.go | 4 +- .../storage/pkg/idtools/idtools_unix.go | 3 +- vendor/github.com/containers/storage/store.go | 36 +- .../containers/storage/types/options.go | 7 +- .../containers/storage/types/utils.go | 3 +- .../github.com/klauspost/compress/README.md | 7 + .../compress/internal/snapref/encode_other.go | 2 +- .../klauspost/compress/zstd/blockdec.go | 3 + .../klauspost/compress/zstd/blockenc.go | 20 + .../klauspost/compress/zstd/decoder.go | 2 +- .../klauspost/compress/zstd/enc_best.go | 12 + .../klauspost/compress/zstd/enc_better.go | 13 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 13 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- vendor/github.com/ulikunitz/xz/README.md | 11 + vendor/github.com/ulikunitz/xz/SECURITY.md | 13 +- vendor/github.com/ulikunitz/xz/TODO.md | 7 +- .../x/crypto/chacha20/chacha_ppc64le.s | 110 +++--- vendor/golang.org/x/crypto/ssh/server.go | 170 +++++--- .../golang.org/x/sync/semaphore/semaphore.go | 42 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 2 +- .../x/sys/unix/syscall_zos_s390x.go | 8 + .../x/sys/windows/syscall_windows.go | 82 ++++ .../golang.org/x/sys/windows/types_windows.go | 24 ++ .../x/sys/windows/zsyscall_windows.go | 126 +++++- .../x/tools/go/gcexportdata/gcexportdata.go | 2 +- .../x/tools/go/types/objectpath/objectpath.go | 20 +- .../x/tools/internal/aliases/aliases.go | 28 ++ .../x/tools/internal/aliases/aliases_go121.go | 30 ++ .../x/tools/internal/aliases/aliases_go122.go | 72 ++++ .../x/tools/internal/gcimporter/gcimporter.go | 7 - .../x/tools/internal/gcimporter/iexport.go | 9 +- .../x/tools/internal/gcimporter/iimport.go | 27 +- .../internal/gcimporter/support_go117.go | 16 - .../internal/gcimporter/support_go118.go | 3 - .../x/tools/internal/gcimporter/unified_no.go | 4 +- .../tools/internal/gcimporter/unified_yes.go | 4 +- .../x/tools/internal/gcimporter/ureader_no.go | 19 - .../tools/internal/gcimporter/ureader_yes.go | 6 +- .../internal/tokeninternal/tokeninternal.go | 28 +- .../x/tools/internal/typeparams/common.go | 37 +- .../x/tools/internal/typeparams/coretype.go | 17 +- .../x/tools/internal/typesinternal/recv.go | 43 +++ .../tools/internal/typesinternal/types_118.go | 3 - .../x/tools/internal/versions/features.go | 43 +++ .../x/tools/internal/versions/toolchain.go | 14 + .../internal/versions/toolchain_go119.go | 14 + .../internal/versions/toolchain_go120.go | 14 + .../internal/versions/toolchain_go121.go | 14 + .../x/tools/internal/versions/types_go121.go | 18 +- .../x/tools/internal/versions/types_go122.go | 25 +- .../x/tools/internal/versions/versions.go | 5 + vendor/modules.txt | 34 +- .../internal/multierror/multierror.go | 82 ---- .../internal/validation/k8s/objectmeta.go | 11 +- .../pkg/cdi/cache.go | 51 ++- .../pkg/cdi/container-edits.go | 52 ++- .../pkg/cdi/default-cache.go | 70 ++++ .../container-device-interface/pkg/cdi/doc.go | 14 + .../pkg/cdi/registry.go | 50 ++- .../pkg/cdi/spec-dirs.go | 3 +- .../pkg/cdi/spec.go | 3 +- .../pkg/cdi/version.go | 25 ++ .../specs-go/config.go | 21 +- .../specs-go/oci.go | 11 + 121 files changed, 2009 insertions(+), 860 deletions(-) create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go create mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases.go create mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go121.go create mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go122.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/recv.go create mode 100644 vendor/golang.org/x/tools/internal/versions/features.go create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go119.go create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go create mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go121.go delete mode 100644 vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go create mode 100644 vendor/tags.cncf.io/container-device-interface/pkg/cdi/default-cache.go diff --git a/go.mod b/go.mod index 8be3862de8f..9e501629808 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,11 @@ go 1.20 require ( github.com/containerd/containerd v1.7.13 github.com/containernetworking/cni v1.1.2 - github.com/containers/common v0.58.1-0.20240318131753-6f1c96f53a78 + github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce github.com/containers/image/v5 v5.30.0 github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 github.com/containers/ocicrypt v1.1.10 - github.com/containers/storage v1.53.0 + github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 github.com/cyphar/filepath-securejoin v0.2.4 github.com/docker/distribution v2.8.3+incompatible github.com/docker/docker v25.0.5+incompatible @@ -19,7 +19,7 @@ require ( github.com/mattn/go-shellwords v1.0.12 github.com/moby/buildkit v0.12.5 github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.31.1 + github.com/onsi/gomega v1.32.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/runc v1.1.12 @@ -34,13 +34,13 @@ require ( github.com/stretchr/testify v1.9.0 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 go.etcd.io/bbolt v1.3.9 - golang.org/x/crypto v0.21.0 - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 - golang.org/x/term v0.18.0 + golang.org/x/crypto v0.22.0 + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.19.0 + golang.org/x/term v0.19.0 sigs.k8s.io/yaml v1.4.0 - tags.cncf.io/container-device-interface v0.6.2 + tags.cncf.io/container-device-interface v0.7.1 ) require ( @@ -100,7 +100,7 @@ require ( github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -132,7 +132,7 @@ require ( github.com/sylabs/sif/v2 v2.15.1 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect github.com/vbatts/tar-split v0.11.5 // indirect github.com/vbauerster/mpb/v8 v8.7.2 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect @@ -144,17 +144,17 @@ require ( go.opentelemetry.io/otel v1.22.0 // indirect go.opentelemetry.io/otel/metric v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect - golang.org/x/mod v0.15.0 // indirect + golang.org/x/mod v0.16.0 // indirect golang.org/x/net v0.22.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/tools v0.19.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/grpc v1.62.0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog v1.0.0 // indirect - tags.cncf.io/container-device-interface/specs-go v0.6.0 // indirect + tags.cncf.io/container-device-interface/specs-go v0.7.0 // indirect ) replace github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.1.0 diff --git a/go.sum b/go.sum index 2d8a3f92fd0..c9f64a63088 100644 --- a/go.sum +++ b/go.sum @@ -58,8 +58,8 @@ github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl3 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic= github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0= -github.com/containers/common v0.58.1-0.20240318131753-6f1c96f53a78 h1:EObLO2IA9zAuRp2Qtf6/siVjxlxE2J8KvtS4H2Q47bc= -github.com/containers/common v0.58.1-0.20240318131753-6f1c96f53a78/go.mod h1:axvr7QuM6sjs/nwukYpNvSgjE5/T2i6l+UhsKLgOvuI= +github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce h1:mt7/jkY4a+q8SHLE85v7D4XoWX0KGC3tAfBZ7Mfpqos= +github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce/go.mod h1:wxQdMk9Wuu178UQLJonrQlBCw940zof77Xm60NmDmlI= github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA= github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -68,8 +68,8 @@ github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84 h1:ZyHhVKzwWxHEph github.com/containers/luksy v0.0.0-20240312134643-3d2cf0e19c84/go.mod h1:+idJYfuH2iLhmk8wZEavPACGMRTnZ/NK/5D74U67Um4= github.com/containers/ocicrypt v1.1.10 h1:r7UR6o8+lyhkEywetubUUgcKFjOWOaWz8cEBrCPX0ic= github.com/containers/ocicrypt v1.1.10/go.mod h1:YfzSSr06PTHQwSTUKqDSjish9BeW1E4HUmreluQcMd8= -github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA= -github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8= +github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 h1:owLaLUu/RKf0x62tFm5ZQjU21oRUUIWTRMpZ0zkIt3E= +github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5/go.mod h1:P4tgJNR/o42wmg+9WZtoJtOJvmZKu2dwzFQggcH9aQw= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -261,8 +261,8 @@ github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0Lh github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -334,8 +334,8 @@ github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3Hig github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -423,8 +423,8 @@ github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwD github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= -github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= +github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14= @@ -478,11 +478,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -490,8 +490,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -523,8 +523,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -556,15 +556,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -590,8 +590,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -656,7 +656,7 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -tags.cncf.io/container-device-interface v0.6.2 h1:dThE6dtp/93ZDGhqaED2Pu374SOeUkBfuvkLuiTdwzg= -tags.cncf.io/container-device-interface v0.6.2/go.mod h1:Shusyhjs1A5Na/kqPVLL0KqnHQHuunol9LFeUNkuGVE= -tags.cncf.io/container-device-interface/specs-go v0.6.0 h1:V+tJJN6dqu8Vym6p+Ru+K5mJ49WL6Aoc5SJFSY0RLsQ= -tags.cncf.io/container-device-interface/specs-go v0.6.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80= +tags.cncf.io/container-device-interface v0.7.1 h1:MATNCbAD1su9U6zwQe5BrQ2vGGp1GBayD70bYaxYCNE= +tags.cncf.io/container-device-interface v0.7.1/go.mod h1:h1JVuOqTQVORp8DziaWKUCDNzAmN+zeCbqbqD30D0ZQ= +tags.cncf.io/container-device-interface/specs-go v0.7.0 h1:w/maMGVeLP6TIQJVYT5pbqTi8SCw/iHZ+n4ignuGHqg= +tags.cncf.io/container-device-interface/specs-go v0.7.0/go.mod h1:hMAwAbMZyBLdmYqWgYcKH0F/yctNpV3P35f+/088A80= diff --git a/run_linux.go b/run_linux.go index b222be707db..0054d7e0cc9 100644 --- a/run_linux.go +++ b/run_linux.go @@ -69,7 +69,10 @@ func setChildProcess() error { func (b *Builder) cdiSetupDevicesInSpec(deviceSpecs []string, configDir string, spec *specs.Spec) ([]string, error) { leftoverDevices := deviceSpecs - registry := cdi.GetRegistry() + registry, err := cdi.NewCache() + if err != nil { + return nil, fmt.Errorf("creating CDI registry: %w", err) + } var configDirs []string if b.CDIConfigDir != "" { configDirs = append(configDirs, b.CDIConfigDir) diff --git a/tests/authenticate.bats b/tests/authenticate.bats index fe276eca165..a86cf312956 100644 --- a/tests/authenticate.bats +++ b/tests/authenticate.bats @@ -32,10 +32,10 @@ load helpers run_buildah 0 login --cert-dir $REGISTRY_DIR --username testuserfoo --password testpassword localhost:$REGISTRY_PORT run_buildah 125 logout --authfile /tmp/nonexistent localhost:$REGISTRY_PORT - expect_output "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory" + assert "$output" =~ "Error: credential file is not accessible: (faccessat|stat) /tmp/nonexistent: no such file or directory" run_buildah 125 logout --compat-auth-file /tmp/nonexistent localhost:$REGISTRY_PORT - expect_output "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory" + assert "$output" =~ "Error: credential file is not accessible: (faccessat|stat) /tmp/nonexistent: no such file or directory" run_buildah 0 logout localhost:$REGISTRY_PORT } diff --git a/tests/bud.bats b/tests/bud.bats index 8d0981698b8..ec0837c50d7 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -4213,7 +4213,7 @@ _EOF @test "bud with Containerfile should fail with nonexistent authfile" { target=alpine-image run_buildah 125 build --authfile /tmp/nonexistent $WITH_POLICY_JSON -t ${target} $BUDFILES/containerfile - expect_output "Error: credential file is not accessible: stat /tmp/nonexistent: no such file or directory" + assert "$output" =~ "Error: credential file is not accessible: (faccessat|stat) /tmp/nonexistent: no such file or directory" } diff --git a/tests/from.bats b/tests/from.bats index 79654631e92..dfd4486c603 100644 --- a/tests/from.bats +++ b/tests/from.bats @@ -394,7 +394,7 @@ load helpers @test "from with nonexistent authfile: fails" { run_buildah 125 from --authfile /no/such/file --pull $WITH_POLICY_JSON alpine - expect_output "Error: credential file is not accessible: stat /no/such/file: no such file or directory" + assert "$output" =~ "Error: credential file is not accessible: (faccessat|stat) /no/such/file: no such file or directory" } @test "from --pull-always: emits 'Getting' even if image is cached" { diff --git a/vendor/github.com/containers/common/libimage/load.go b/vendor/github.com/containers/common/libimage/load.go index 2be6a1b64d0..0327cbca38b 100644 --- a/vendor/github.com/containers/common/libimage/load.go +++ b/vendor/github.com/containers/common/libimage/load.go @@ -6,14 +6,15 @@ import ( "context" "errors" "fmt" - "os" "time" dirTransport "github.com/containers/image/v5/directory" dockerArchiveTransport "github.com/containers/image/v5/docker/archive" ociArchiveTransport "github.com/containers/image/v5/oci/archive" ociTransport "github.com/containers/image/v5/oci/layout" + "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -21,6 +22,30 @@ type LoadOptions struct { CopyOptions } +// doLoadReference does the heavy lifting for LoadReference() and Load(), +// without adding debug messages or handling defaults. +func (r *Runtime) doLoadReference(ctx context.Context, ref types.ImageReference, options *LoadOptions) (images []string, transportName string, err error) { + transportName = ref.Transport().Name() + switch transportName { + case dockerArchiveTransport.Transport.Name(): + images, err = r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) + default: + images, err = r.copyFromDefault(ctx, ref, &options.CopyOptions) + } + return images, ref.Transport().Name(), err +} + +// LoadReference loads one or more images from the specified location. +func (r *Runtime) LoadReference(ctx context.Context, ref types.ImageReference, options *LoadOptions) ([]string, error) { + logrus.Debugf("Loading image from %q", transports.ImageName(ref)) + + if options == nil { + options = &LoadOptions{} + } + images, _, err := r.doLoadReference(ctx, ref, options) + return images, err +} + // Load loads one or more images (depending on the transport) from the // specified path. The path may point to an image the following transports: // oci, oci-archive, dir, docker-archive. @@ -41,8 +66,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( if err != nil { return nil, ociTransport.Transport.Name(), err } - images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) - return images, ociTransport.Transport.Name(), err + return r.doLoadReference(ctx, ref, options) }, // OCI-ARCHIVE @@ -52,8 +76,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( if err != nil { return nil, ociArchiveTransport.Transport.Name(), err } - images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) - return images, ociArchiveTransport.Transport.Name(), err + return r.doLoadReference(ctx, ref, options) }, // DOCKER-ARCHIVE @@ -63,8 +86,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( if err != nil { return nil, dockerArchiveTransport.Transport.Name(), err } - images, err := r.loadMultiImageDockerArchive(ctx, ref, &options.CopyOptions) - return images, dockerArchiveTransport.Transport.Name(), err + return r.doLoadReference(ctx, ref, options) }, // DIR @@ -74,8 +96,7 @@ func (r *Runtime) Load(ctx context.Context, path string, options *LoadOptions) ( if err != nil { return nil, dirTransport.Transport.Name(), err } - images, err := r.copyFromDefault(ctx, ref, &options.CopyOptions) - return images, dirTransport.Transport.Name(), err + return r.doLoadReference(ctx, ref, options) }, } { loadedImages, transportName, err := f() @@ -120,7 +141,7 @@ func (r *Runtime) loadMultiImageDockerArchive(ctx context.Context, ref types.Ima // syntax to reference an image within the archive was used, so we // should. path := ref.StringWithinTransport() - if _, err := os.Stat(path); err != nil { + if err := fileutils.Exists(path); err != nil { return r.copyFromDockerArchive(ctx, ref, options) } diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index d7ee5e6b67e..14d00685cba 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -314,6 +314,29 @@ type ManifestListAddOptions struct { Password string } +func (m *ManifestList) parseNameToExtantReference(ctx context.Context, name string, manifestList bool, what string) (types.ImageReference, error) { + ref, err := alltransports.ParseImageName(name) + if err != nil { + withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name) + ref, err = alltransports.ParseImageName(withDocker) + if err == nil { + var src types.ImageSource + src, err = ref.NewImageSource(ctx, nil) + if err == nil { + src.Close() + } + } + if err != nil { + image, _, lookupErr := m.image.runtime.LookupImage(name, &LookupImageOptions{ManifestList: manifestList}) + if lookupErr != nil { + return nil, fmt.Errorf("locating %s: %q: %w; %q: %w", what, withDocker, err, name, lookupErr) + } + ref, err = image.storageReference, nil + } + } + return ref, err +} + // Add adds one or more manifests to the manifest list and returns the digest // of the added instance. func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestListAddOptions) (digest.Digest, error) { @@ -321,13 +344,9 @@ func (m *ManifestList) Add(ctx context.Context, name string, options *ManifestLi options = &ManifestListAddOptions{} } - ref, err := alltransports.ParseImageName(name) + ref, err := m.parseNameToExtantReference(ctx, name, false, "image to add to manifest list") if err != nil { - withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), name) - ref, err = alltransports.ParseImageName(withDocker) - if err != nil { - return "", err - } + return "", err } // Now massage in the copy-related options into the system context. @@ -428,17 +447,9 @@ func (m *ManifestList) AddArtifact(ctx context.Context, options *ManifestListAdd opts.LayerMediaType = &options.LayerType } if options.Subject != "" { - ref, err := alltransports.ParseImageName(options.Subject) + ref, err := m.parseNameToExtantReference(ctx, options.Subject, true, "subject for artifact manifest") if err != nil { - withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), options.Subject) - ref, err = alltransports.ParseImageName(withDocker) - if err != nil { - image, _, err := m.image.runtime.LookupImage(options.Subject, &LookupImageOptions{ManifestList: true}) - if err != nil { - return "", fmt.Errorf("locating subject for artifact manifest: %w", err) - } - ref = image.storageReference - } + return "", err } opts.SubjectReference = ref } @@ -541,17 +552,9 @@ func (m *ManifestList) AnnotateInstance(d digest.Digest, options *ManifestListAn } } if options.Subject != "" { - ref, err := alltransports.ParseImageName(options.Subject) + ref, err := m.parseNameToExtantReference(ctx, options.Subject, true, "subject for image index") if err != nil { - withDocker := fmt.Sprintf("%s://%s", docker.Transport.Name(), options.Subject) - ref, err = alltransports.ParseImageName(withDocker) - if err != nil { - image, _, err := m.image.runtime.LookupImage(options.Subject, &LookupImageOptions{ManifestList: true}) - if err != nil { - return fmt.Errorf("locating subject for image index: %w", err) - } - ref = image.storageReference - } + return err } src, err := ref.NewImageSource(ctx, &m.image.runtime.systemContext) if err != nil { diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 78349b50340..70410508efa 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -32,6 +32,7 @@ import ( "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" digest "github.com/opencontainers/go-digest" @@ -330,7 +331,7 @@ func (l *list) Reference(store storage.Store, multiple cp.ImageListSelection, in return nil, fmt.Errorf(`internal error: no file or blob with artifact "config" or "layer" digest %q recorded`, referencedBlobDigest) } expectedLayerBlobPath := filepath.Join(blobsDir, referencedBlobDigest.Encoded()) - if _, err := os.Lstat(expectedLayerBlobPath); err == nil { + if err := fileutils.Lexists(expectedLayerBlobPath); err == nil { // did this one already continue } else if knownFile { @@ -495,6 +496,14 @@ func prepareAddWithCompression(variants []string) ([]cp.OptionCompressionVariant return res, nil } +func mapToSlice(m map[string]string) []string { + slice := make([]string, 0, len(m)) + for key, value := range m { + slice = append(slice, key+"="+value) + } + return slice +} + // Add adds information about the specified image to the list, computing the // image's manifest's digest, retrieving OS and architecture information from // the image's configuration, and recording the image's reference so that it @@ -516,6 +525,7 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag Size int64 ConfigInfo types.BlobInfo ArtifactType string + URLs []string } var instanceInfos []instanceInfo var manifestDigest digest.Digest @@ -547,6 +557,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag OSFeatures: append([]string{}, platform.OSFeatures...), Size: instance.Size, ArtifactType: instance.ArtifactType, + Annotations: mapToSlice(instance.Annotations), + URLs: instance.URLs, } instanceInfos = append(instanceInfos, instanceInfo) } @@ -578,6 +590,8 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag OSFeatures: append([]string{}, platform.OSFeatures...), Size: instance.Size, ArtifactType: instance.ArtifactType, + Annotations: mapToSlice(instance.Annotations), + URLs: instance.URLs, } instanceInfos = append(instanceInfos, instanceInfo) added = true @@ -649,6 +663,9 @@ func (l *list) Add(ctx context.Context, sys *types.SystemContext, ref types.Imag if err != nil { return "", fmt.Errorf("adding instance with digest %q: %w", *instanceInfo.instanceDigest, err) } + if err = l.List.SetURLs(*instanceInfo.instanceDigest, instanceInfo.URLs); err != nil { + return "", fmt.Errorf("setting URLs for instance with digest %q: %w", *instanceInfo.instanceDigest, err) + } if _, ok := l.instances[*instanceInfo.instanceDigest]; !ok { l.instances[*instanceInfo.instanceDigest] = transports.ImageName(ref) } diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 6c8d87c57eb..e872b1b80fd 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -240,6 +240,15 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, // Figure out a name for the storage destination. var storageName, imageName string switch ref.Transport().Name() { + case registryTransport.Transport.Name(): + // Normalize to docker.io if needed (see containers/podman/issues/10998). + named, err := reference.ParseNormalizedNamed(strings.TrimLeft(ref.StringWithinTransport(), ":/")) + if err != nil { + return nil, err + } + imageName = named.String() + storageName = imageName + case dockerDaemonTransport.Transport.Name(): // Normalize to docker.io if needed (see containers/podman/issues/10998). named, err := reference.ParseNormalizedNamed(ref.StringWithinTransport()) diff --git a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go index 711535ced8d..81206acf756 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/cni_types.go +++ b/vendor/github.com/containers/common/libnetwork/cni/cni_types.go @@ -4,10 +4,10 @@ package cni import ( "net" - "os" "path/filepath" "github.com/containers/common/libnetwork/types" + "github.com/containers/storage/pkg/fileutils" ) const ( @@ -250,7 +250,7 @@ func newDNSNamePlugin(domainName string) dnsNameConfig { // hasDNSNamePlugin looks to see if the dnsname cni plugin is present func hasDNSNamePlugin(paths []string) bool { for _, p := range paths { - if _, err := os.Stat(filepath.Join(p, "dnsname")); err == nil { + if err := fileutils.Exists(filepath.Join(p, "dnsname")); err == nil { return true } } diff --git a/vendor/github.com/containers/common/libnetwork/cni/network.go b/vendor/github.com/containers/common/libnetwork/cni/network.go index 06b78f67512..94d13f7a0ff 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/network.go +++ b/vendor/github.com/containers/common/libnetwork/cni/network.go @@ -19,6 +19,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/version" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/lockfile" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" @@ -331,7 +332,7 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo { if err != nil { logrus.Infof("Failed to get the dnsname plugin version: %v", err) } - if _, err := os.Stat(dnsPath); err == nil { + if err := fileutils.Exists(dnsPath); err == nil { info.DNS = types.DNSNetworkInfo{ Path: dnsPath, Package: dnsPackage, diff --git a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go index 98961935dac..78fe8e32507 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go +++ b/vendor/github.com/containers/common/libnetwork/internal/rootlessnetns/netns_linux.go @@ -16,6 +16,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/netns" "github.com/containers/common/pkg/systemd" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/lockfile" "github.com/hashicorp/go-multierror" @@ -100,18 +101,37 @@ func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) { nsPath := n.getPath(rootlessNetnsDir) nsRef, err := ns.GetNS(nsPath) if err == nil { - // TODO check if slirp4netns is alive - return nsRef, false, nil - } - logrus.Debugf("Creating rootless network namespace at %q", nsPath) - // We have to create the netns dir again here because it is possible - // that cleanup() removed it. - if err := os.MkdirAll(n.dir, 0o700); err != nil { - return nil, false, wrapError("", err) - } - netns, err := netns.NewNSAtPath(nsPath) - if err != nil { - return nil, false, wrapError("create netns", err) + pidPath := n.getPath(rootlessNetNsConnPidFile) + pid, err := readPidFile(pidPath) + if err == nil { + // quick check if pasta/slirp4netns are still running + err := unix.Kill(pid, 0) + if err == nil { + // All good, return the netns. + return nsRef, false, nil + } + // Print warnings in case things went wrong, we might be able to recover + // but maybe not so make sure to leave some hints so we can figure out what went wrong. + if errors.Is(err, unix.ESRCH) { + logrus.Warn("rootless netns program no longer running, trying to start it again") + } else { + logrus.Warnf("failed to check if rootless netns program is running: %v, trying to start it again", err) + } + } else { + logrus.Warnf("failed to read rootless netns program pid: %v", err) + } + // In case of errors continue and setup the network cmd again. + } else { + logrus.Debugf("Creating rootless network namespace at %q", nsPath) + // We have to create the netns dir again here because it is possible + // that cleanup() removed it. + if err := os.MkdirAll(n.dir, 0o700); err != nil { + return nil, false, wrapError("", err) + } + nsRef, err = netns.NewNSAtPath(nsPath) + if err != nil { + return nil, false, wrapError("create netns", err) + } } switch strings.ToLower(n.config.Network.DefaultRootlessNetworkCmd) { case "", slirp4netns.BinaryName: @@ -121,11 +141,21 @@ func (n *Netns) getOrCreateNetns() (ns.NetNS, bool, error) { default: err = fmt.Errorf("invalid rootless network command %q", n.config.Network.DefaultRootlessNetworkCmd) } - return netns, true, err + // If pasta or slirp4netns fail here we need to get rid of the netns again to not leak it, + // otherwise the next command thinks the netns was successfully setup. + if err != nil { + if nerr := netns.UnmountNS(nsPath); nerr != nil { + logrus.Error(nerr) + } + _ = nsRef.Close() + return nil, false, err + } + + return nsRef, true, nil } func (n *Netns) cleanup() error { - if _, err := os.Stat(n.dir); err != nil { + if err := fileutils.Exists(n.dir); err != nil { if errors.Is(err, fs.ErrNotExist) { // dir does not exists no need for cleanup return nil @@ -165,11 +195,7 @@ func (n *Netns) setupPasta(nsPath string) error { if systemd.RunsOnSystemd() { // Treat these as fatal - if pasta failed to write a PID file something is probably wrong. - pidfile, err := os.ReadFile(pidPath) - if err != nil { - return fmt.Errorf("unable to open pasta PID file: %w", err) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(pidfile))) + pid, err := readPidFile(pidPath) if err != nil { return fmt.Errorf("unable to decode pasta PID: %w", err) } @@ -245,16 +271,12 @@ func (n *Netns) setupSlirp4netns(nsPath string) error { func (n *Netns) cleanupRootlessNetns() error { pidFile := n.getPath(rootlessNetNsConnPidFile) - b, err := os.ReadFile(pidFile) + pid, err := readPidFile(pidFile) if err == nil { - var i int - i, err = strconv.Atoi(strings.TrimSpace(string(b))) - if err == nil { - // kill the slirp process so we do not leak it - err = unix.Kill(i, unix.SIGTERM) - if err == unix.ESRCH { - err = nil - } + // kill the slirp/pasta process so we do not leak it + err = unix.Kill(pid, unix.SIGTERM) + if err == unix.ESRCH { + err = nil } } return err @@ -294,6 +316,13 @@ func (n *Netns) setupMounts() error { return wrapError("create new mount namespace", err) } + // Ensure we mount private in our mountns to prevent accidentally + // overwriting the host mounts in case the default propagation is shared. + err = unix.Mount("", "/", "", unix.MS_PRIVATE|unix.MS_REC, "") + if err != nil { + return wrapError("make tree private in new mount namespace", err) + } + xdgRuntimeDir, err := homedir.GetRuntimeDir() if err != nil { return fmt.Errorf("could not get runtime directory: %w", err) @@ -301,7 +330,7 @@ func (n *Netns) setupMounts() error { newXDGRuntimeDir := n.getPath(xdgRuntimeDir) // 1. Mount the netns into the new run to keep them accessible. // Otherwise cni setup will fail because it cannot access the netns files. - err = mountAndMkdirDest(xdgRuntimeDir, newXDGRuntimeDir, none, unix.MS_BIND|unix.MS_SHARED|unix.MS_REC) + err = mountAndMkdirDest(xdgRuntimeDir, newXDGRuntimeDir, none, unix.MS_BIND|unix.MS_REC) if err != nil { return err } @@ -309,7 +338,7 @@ func (n *Netns) setupMounts() error { // 2. Also keep /run/systemd if it exists. // Many files are symlinked into this dir, for example /dev/log. runSystemd := "/run/systemd" - _, err = os.Stat(runSystemd) + err = fileutils.Exists(runSystemd) if err == nil { newRunSystemd := n.getPath(runSystemd) err = mountAndMkdirDest(runSystemd, newRunSystemd, none, unix.MS_BIND|unix.MS_REC) @@ -448,7 +477,7 @@ func (n *Netns) mountCNIVarDir() error { // while we could always use /var there are cases where a user might store the cni // configs under /var/custom and this would break for { - if _, err := os.Stat(varTarget); err == nil { + if err := fileutils.Exists(varTarget); err == nil { varDir = n.getPath(varTarget) break } @@ -556,15 +585,12 @@ func (n *Netns) Run(lock *lockfile.LockFile, toRun func() error) error { logrus.Errorf("Failed to decrement ref count: %v", err) return inErr } - if count == 0 { + // runInner() already cleans up the netns when it created a new one on errors + // so we only need to do that if there was no error. + if inErr == nil && count == 0 { err = n.cleanup() if err != nil { - err = wrapError("cleanup", err) - if inErr == nil { - return err - } - logrus.Errorf("Failed to cleanup rootless netns: %v", err) - return inErr + return wrapError("cleanup", err) } } @@ -599,3 +625,11 @@ func refCount(dir string, inc int) (int, error) { return currentCount, nil } + +func readPidFile(path string) (int, error) { + b, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return strconv.Atoi(strings.TrimSpace(string(b))) +} diff --git a/vendor/github.com/containers/common/libnetwork/netavark/config.go b/vendor/github.com/containers/common/libnetwork/netavark/config.go index bcd1eaea36e..3a77d3ab2fd 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/config.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/config.go @@ -376,6 +376,11 @@ func (n *netavarkNetwork) NetworkRemove(nameOrID string) error { return fmt.Errorf("default network %s cannot be removed", n.defaultNetwork) } + // remove the ipam bucket for this network + if err := n.removeNetworkIPAMBucket(network); err != nil { + return err + } + file := filepath.Join(n.networkConfigDir, network.Name+".json") // make sure to not error for ErrNotExist if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) { diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go index b9a48d456a7..d348ada0b29 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -4,6 +4,7 @@ package netavark import ( "encoding/json" + "errors" "fmt" "net" @@ -357,6 +358,26 @@ func (n *netavarkNetwork) deallocIPs(opts *types.NetworkOptions) error { return err } +func (n *netavarkNetwork) removeNetworkIPAMBucket(network *types.Network) error { + if !requiresIPAMAlloc(network) { + return nil + } + db, err := n.openDB() + if err != nil { + return err + } + defer db.Close() + + return db.Update(func(tx *bbolt.Tx) error { + // Ignore ErrBucketNotFound, can happen if the network never allocated any ips, + // i.e. because no container was started. + if err := tx.DeleteBucket([]byte(network.Name)); err != nil && !errors.Is(err, bbolt.ErrBucketNotFound) { + return err + } + return nil + }) +} + // requiresIPAMAlloc return true when we have to allocate ips for this network // it checks the ipam driver and if subnets are set func requiresIPAMAlloc(network *types.Network) bool { diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index 9f4ee3135cf..d79fdff43af 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -135,7 +135,11 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { } var netns *rootlessnetns.Netns - if unshare.IsRootless() { + // Do not use unshare.IsRootless() here. We only care if we are running re-exec in the userns, + // IsRootless() also returns true if we are root in a userns which is not what we care about and + // causes issues as this slower more complicated rootless-netns logic should not be used as root. + _, useRootlessNetns := os.LookupEnv(unshare.UsernsEnvName) + if useRootlessNetns { netns, err = rootlessnetns.New(conf.NetworkRunDir, rootlessnetns.Netavark, conf.Config) if err != nil { return nil, err @@ -147,7 +151,7 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) { networkRunDir: conf.NetworkRunDir, netavarkBinary: conf.NetavarkBinary, aardvarkBinary: conf.AardvarkBinary, - networkRootless: unshare.IsRootless(), + networkRootless: useRootlessNetns, ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"), firewallDriver: conf.Config.Network.FirewallDriver, defaultNetwork: defaultNetworkName, diff --git a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go index 472fb9452c4..0d2b69d2597 100644 --- a/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go +++ b/vendor/github.com/containers/common/libnetwork/resolvconf/resolv.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" @@ -61,7 +62,7 @@ func getDefaultResolvConf(params *Params) ([]byte, bool, error) { if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") { // check for netns created by "ip netns" path := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf") - _, err := os.Stat(path) + err := fileutils.Exists(path) if err == nil { resolveConf = path } diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go index 5cbb6ba9fc7..bc97a2804a3 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux.go @@ -16,6 +16,7 @@ import ( "text/template" "github.com/containers/common/pkg/apparmor/internal/supported" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/unshare" runcaa "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/sirupsen/logrus" @@ -75,7 +76,7 @@ func (p *profileData) generateDefault(apparmorParserPath string, out io.Writer) // macrosExists checks if the passed macro exists. func macroExists(m string) bool { - _, err := os.Stat(path.Join(profileDirectory, m)) + err := fileutils.Exists(path.Join(profileDirectory, m)) return err == nil } diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go index 3b6a05f0ab9..a3d333a99ad 100644 --- a/vendor/github.com/containers/common/pkg/auth/auth.go +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -16,6 +16,7 @@ import ( "github.com/containers/image/v5/pkg/docker/config" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -69,7 +70,7 @@ func CheckAuthFile(pathOption string) error { if pathOption == "" { return nil } - if _, err := os.Stat(pathOption); err != nil { + if err := fileutils.Exists(pathOption); err != nil { return fmt.Errorf("credential file is not accessible: %w", err) } return nil diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index 3b553692012..717685789b6 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -17,6 +17,7 @@ import ( "syscall" "time" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/unshare" systemdDbus "github.com/coreos/go-systemd/v22/dbus" "github.com/godbus/dbus/v5" @@ -367,7 +368,7 @@ func Load(path string) (*CgroupControl, error) { // check that the cgroup exists at least under one controller for name := range handlers { p := control.getCgroupv1Path(name) - if _, err := os.Stat(p); err == nil { + if err := fileutils.Exists(p); err == nil { oneExists = true break } @@ -575,7 +576,7 @@ func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, e func (c *CgroupControl) createCgroupDirectory(controller string) (bool, error) { cPath := c.getCgroupv1Path(controller) - _, err := os.Stat(cPath) + err := fileutils.Exists(cPath) if err == nil { return false, nil } diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go index be9d11584cc..8cd4a052901 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "github.com/containers/storage/pkg/fileutils" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/sirupsen/logrus" @@ -235,7 +236,7 @@ func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error { cgroupRoot = filepath.Join(cgroupRoot, "unified") // Ignore the unified mount if it doesn't exist - if _, err := os.Stat(cgroupRoot); err != nil && os.IsNotExist(err) { + if err := fileutils.Exists(cgroupRoot); err != nil && os.IsNotExist(err) { continue } } else if parts[1] != "" { diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index 15c91860c18..d8e0646e67c 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -12,6 +12,7 @@ import ( "github.com/containers/common/internal/attributedstring" "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/capabilities" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/unshare" units "github.com/docker/go-units" selinux "github.com/opencontainers/selinux/go-selinux" @@ -328,6 +329,11 @@ type EngineConfig struct { // this slice takes precedence. HooksDir attributedstring.Slice `toml:"hooks_dir,omitempty"` + // Location of CDI configuration files. These define mounts devices and + // other configs according to the CDI spec. In particular this is used + // for GPU passthrough. + CdiSpecDirs attributedstring.Slice `toml:"cdi_spec_dirs,omitempty"` + // ImageBuildFormat (DEPRECATED) indicates the default image format to // building container images. Should use ImageDefaultFormat ImageBuildFormat string `toml:"image_build_format,omitempty"` @@ -710,7 +716,7 @@ func (c *Config) CheckCgroupsAndAdjustConfig() { if hasSession { for _, part := range strings.Split(session, ",") { if strings.HasPrefix(part, "unix:path=") { - _, err := os.Stat(strings.TrimPrefix(part, "unix:path=")) + err := fileutils.Exists(strings.TrimPrefix(part, "unix:path=")) hasSession = err == nil break } @@ -772,10 +778,10 @@ func (m *MachineConfig) URI() string { } func (c *EngineConfig) findRuntime() string { - // Search for crun first followed by runc, kata, runsc + // Search for crun first followed by runc, runj, kata, runsc, ocijail for _, name := range []string{"crun", "runc", "runj", "kata", "runsc", "ocijail"} { for _, v := range c.OCIRuntimes[name] { - if _, err := os.Stat(v); err == nil { + if err := fileutils.Exists(v); err == nil { return name } } @@ -1184,7 +1190,7 @@ func (c *Config) FindInitBinary() (string, error) { return c.Engine.InitPath, nil } // keep old default working to guarantee backwards compat - if _, err := os.Stat(DefaultInitPath); err == nil { + if err := fileutils.Exists(DefaultInitPath); err == nil { return DefaultInitPath, nil } return c.FindHelperBinary(defaultInitName, true) diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index e9826d62ca6..3b8979ce2d4 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -9,6 +9,7 @@ import ( "regexp" "strings" + "github.com/containers/storage/pkg/fileutils" units "github.com/docker/go-units" "tags.cncf.io/container-device-interface/pkg/parser" ) @@ -83,7 +84,7 @@ func (c *ContainersConfig) validateTZ() error { for _, paths := range lookupPaths { zonePath := filepath.Join(paths, c.TZ) - if _, err := os.Stat(zonePath); err == nil { + if err := fileutils.Exists(zonePath); err == nil { // found zone information return nil } diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index 6e0044f6d9d..f22cd20df25 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -544,6 +544,12 @@ default_sysctls = [ # "/usr/share/containers/oci/hooks.d", #] +# Directories to scan for CDI Spec files. +# +#cdi_spec_dirs = [ +# "/etc/cdi", +#] + # Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building # container images. By default image pulled and pushed match the format of the # source image. Building/committing defaults to OCI. diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index f471e307905..165453b9cd7 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -414,6 +414,12 @@ default_sysctls = [ # "/usr/local/share/containers/oci/hooks.d", #] +# Directories to scan for CDI Spec files. +# +#cdi_spec_dirs = [ +# "/etc/cdi", +#] + # Manifest Type (oci, v2s2, or v2s1) to use when pulling, pushing, building # container images. By default image pulled and pushed match the format of the # source image. Building/committing defaults to OCI. diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index b08f16592f4..5faba8c9fd2 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -13,6 +13,7 @@ import ( nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/apparmor" "github.com/containers/common/pkg/cgroupv2" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/containers/storage/types" @@ -74,6 +75,8 @@ var ( ErrInvalidArg = errors.New("invalid argument") // DefaultHooksDirs defines the default hooks directory. DefaultHooksDirs = []string{"/usr/share/containers/oci/hooks.d"} + // DefaultCdiSpecDirs defines the default cdi spec directories. + DefaultCdiSpecDirs = []string{"/etc/cdi"} // DefaultCapabilities is the default for the default_capabilities option in the containers.conf file. DefaultCapabilities = []string{ "CAP_CHOWN", @@ -204,8 +207,8 @@ func defaultConfig() (*Config, error) { } sigPath := filepath.Join(configHome, DefaultRootlessSignaturePolicyPath) defaultEngineConfig.SignaturePolicyPath = sigPath - if _, err := os.Stat(sigPath); err != nil { - if _, err := os.Stat(DefaultSignaturePolicyPath); err == nil { + if err := fileutils.Exists(sigPath); err != nil { + if err := fileutils.Exists(DefaultSignaturePolicyPath); err == nil { defaultEngineConfig.SignaturePolicyPath = DefaultSignaturePolicyPath } } @@ -347,6 +350,7 @@ func defaultEngineConfig() (*EngineConfig, error) { c.HelperBinariesDir.Set(append([]string{additionalHelperBinariesDir}, c.HelperBinariesDir.Get()...)) } c.HooksDir.Set(DefaultHooksDirs) + c.CdiSpecDirs.Set(DefaultCdiSpecDirs) c.ImageDefaultTransport = _defaultTransport c.ImageVolumeMode = _defaultImageVolumeMode diff --git a/vendor/github.com/containers/common/pkg/config/modules.go b/vendor/github.com/containers/common/pkg/config/modules.go index f21671f6b1d..4f23694b77e 100644 --- a/vendor/github.com/containers/common/pkg/config/modules.go +++ b/vendor/github.com/containers/common/pkg/config/modules.go @@ -2,9 +2,9 @@ package config import ( "fmt" - "os" "path/filepath" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/hashicorp/go-multierror" @@ -76,7 +76,7 @@ func ModuleDirectories() ([]string, error) { // Public API for shell completions // Resolve the specified path to a module. func resolveModule(path string, dirs []string) (string, error) { if filepath.IsAbs(path) { - _, err := os.Stat(path) + err := fileutils.Exists(path) return path, err } @@ -85,7 +85,7 @@ func resolveModule(path string, dirs []string) (string, error) { var multiErr error for _, d := range dirs { candidate := filepath.Join(d, path) - _, err := os.Stat(candidate) + err := fileutils.Exists(candidate) if err == nil { return candidate, nil } diff --git a/vendor/github.com/containers/common/pkg/config/new.go b/vendor/github.com/containers/common/pkg/config/new.go index fb59473f006..51da47db1f8 100644 --- a/vendor/github.com/containers/common/pkg/config/new.go +++ b/vendor/github.com/containers/common/pkg/config/new.go @@ -11,6 +11,7 @@ import ( "sync" "github.com/BurntSushi/toml" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -101,7 +102,7 @@ func newLocked(options *Options) (*Config, error) { // The _OVERRIDE variable _must_ always win. That's a contract we need // to honor (for the Podman CI). if path := os.Getenv(containersConfOverrideEnv); path != "" { - if _, err := os.Stat(path); err != nil { + if err := fileutils.Exists(path); err != nil { return nil, fmt.Errorf("%s file: %w", containersConfOverrideEnv, err) } options.additionalConfigs = append(options.additionalConfigs, path) @@ -152,7 +153,7 @@ func NewConfig(userConfigPath string) (*Config, error) { // file settings. func systemConfigs() (configs []string, finalErr error) { if path := os.Getenv(containersConfEnv); path != "" { - if _, err := os.Stat(path); err != nil { + if err := fileutils.Exists(path); err != nil { return nil, fmt.Errorf("%s file: %w", containersConfEnv, err) } return append(configs, path), nil diff --git a/vendor/github.com/containers/common/pkg/config/pull_policy.go b/vendor/github.com/containers/common/pkg/config/pull_policy.go index be6030fdb81..d85d30ffed4 100644 --- a/vendor/github.com/containers/common/pkg/config/pull_policy.go +++ b/vendor/github.com/containers/common/pkg/config/pull_policy.go @@ -2,6 +2,7 @@ package config import ( "fmt" + "strings" ) // PullPolicy determines how and which images are being pulled from a container @@ -73,14 +74,14 @@ func (p PullPolicy) Validate() error { // * "newer" <-> PullPolicyNewer (also "ifnewer") // * "never" <-> PullPolicyNever func ParsePullPolicy(s string) (PullPolicy, error) { - switch s { - case "always", "Always": + switch strings.ToLower(s) { + case "always": return PullPolicyAlways, nil - case "missing", "Missing", "ifnotpresent", "IfNotPresent", "": + case "missing", "ifmissing", "ifnotpresent", "": return PullPolicyMissing, nil - case "newer", "Newer", "ifnewer", "IfNewer": + case "newer", "ifnewer": return PullPolicyNewer, nil - case "never", "Never": + case "never": return PullPolicyNever, nil default: return PullPolicyUnsupported, fmt.Errorf("unsupported pull policy %q", s) diff --git a/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go b/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go index 71f940a64cb..be78b0cfbd6 100644 --- a/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go +++ b/vendor/github.com/containers/common/pkg/hooks/1.0.0/hook.go @@ -5,9 +5,9 @@ import ( "encoding/json" "errors" "fmt" - "os" "regexp" + "github.com/containers/storage/pkg/fileutils" rspec "github.com/opencontainers/runtime-spec/specs-go" ) @@ -44,7 +44,7 @@ func (hook *Hook) Validate(extensionStages []string) (err error) { return errors.New("missing required property: hook.path") } - if _, err := os.Stat(hook.Hook.Path); err != nil { + if err := fileutils.Exists(hook.Hook.Path); err != nil { return err } diff --git a/vendor/github.com/containers/common/pkg/manifests/manifests.go b/vendor/github.com/containers/common/pkg/manifests/manifests.go index 30f099a06ea..247846a74b3 100644 --- a/vendor/github.com/containers/common/pkg/manifests/manifests.go +++ b/vendor/github.com/containers/common/pkg/manifests/manifests.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "strings" "github.com/containers/common/internal" "github.com/containers/image/v5/manifest" @@ -80,10 +81,18 @@ func Create() List { } } +func sliceToMap(s []string) map[string]string { + m := make(map[string]string, len(s)) + for _, spec := range s { + key, value, _ := strings.Cut(spec, "=") + m[key] = value + } + return m +} + // AddInstance adds an entry for the specified manifest digest, with assorted // additional information specified in parameters, to the list or index. func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, manifestType, osName, architecture, osVersion string, osFeatures []string, variant string, features, annotations []string) error { // nolint:revive - // FIXME: the annotations argument is currently ignored if err := l.Remove(manifestDigest); err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -116,10 +125,11 @@ func (l *list) AddInstance(manifestDigest digest.Digest, manifestSize int64, man ociv1platform = nil } l.oci.Manifests = append(l.oci.Manifests, v1.Descriptor{ - MediaType: manifestType, - Size: manifestSize, - Digest: manifestDigest, - Platform: ociv1platform, + MediaType: manifestType, + Size: manifestSize, + Digest: manifestDigest, + Platform: ociv1platform, + Annotations: sliceToMap(annotations), }) return nil diff --git a/vendor/github.com/containers/common/pkg/parse/parse.go b/vendor/github.com/containers/common/pkg/parse/parse.go index 284751e523f..e73e7cbf6bb 100644 --- a/vendor/github.com/containers/common/pkg/parse/parse.go +++ b/vendor/github.com/containers/common/pkg/parse/parse.go @@ -6,10 +6,11 @@ package parse import ( "errors" "fmt" - "os" "path" "path/filepath" "strings" + + "github.com/containers/storage/pkg/fileutils" ) // ValidateVolumeOpts validates a volume's options @@ -175,7 +176,7 @@ func ValidateVolumeHostDir(hostDir string) error { return errors.New("host directory cannot be empty") } if filepath.IsAbs(hostDir) { - if _, err := os.Stat(hostDir); err != nil { + if err := fileutils.Exists(hostDir); err != nil { return err } } diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index 6845914aa28..ded66365bb4 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/containers/common/pkg/umask" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" rspec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" @@ -182,7 +183,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, mountFiles = append(mountFiles, mountFile) } for _, file := range mountFiles { - if _, err := os.Stat(file); err == nil { + if err := fileutils.Exists(file); err == nil { mounts, err := addSubscriptionsFromMountsFile(file, mountLabel, containerRunDir, uid, gid) if err != nil { logrus.Warnf("Failed to mount subscriptions, skipping entry in %s: %v", file, err) @@ -197,7 +198,7 @@ func MountsWithUIDGID(mountLabel, containerRunDir, mountFile, mountPoint string, return subscriptionMounts } // Add FIPS mode subscription if /etc/system-fips exists on the host - _, err := os.Stat("/etc/system-fips") + err := fileutils.Exists("/etc/system-fips") switch { case err == nil: if err := addFIPSModeSubscription(&subscriptionMounts, containerRunDir, mountPoint, mountLabel, uid, gid); err != nil { @@ -240,7 +241,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string ctrDirOrFileOnHost := filepath.Join(containerRunDir, ctrDirOrFile) // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost - _, err = os.Stat(ctrDirOrFileOnHost) + err = fileutils.Exists(ctrDirOrFileOnHost) if errors.Is(err, os.ErrNotExist) { hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile) if err != nil { @@ -315,7 +316,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, mountLabel string, uid, gid int) error { subscriptionsDir := "/run/secrets" ctrDirOnHost := filepath.Join(containerRunDir, subscriptionsDir) - if _, err := os.Stat(ctrDirOnHost); errors.Is(err, os.ErrNotExist) { + if err := fileutils.Exists(ctrDirOnHost); errors.Is(err, os.ErrNotExist) { if err = idtools.MkdirAllAs(ctrDirOnHost, 0o755, uid, gid); err != nil { //nolint return err } @@ -325,7 +326,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, } fipsFile := filepath.Join(ctrDirOnHost, "system-fips") // In the event of restart, it is possible for the FIPS mode file to already exist - if _, err := os.Stat(fipsFile); errors.Is(err, os.ErrNotExist) { + if err := fileutils.Exists(fipsFile); errors.Is(err, os.ErrNotExist) { file, err := os.Create(fipsFile) if err != nil { return fmt.Errorf("creating system-fips file in container for FIPS mode: %w", err) @@ -346,7 +347,7 @@ func addFIPSModeSubscription(mounts *[]rspec.Mount, containerRunDir, mountPoint, srcBackendDir := "/usr/share/crypto-policies/back-ends/FIPS" destDir := "/etc/crypto-policies/back-ends" srcOnHost := filepath.Join(mountPoint, srcBackendDir) - if _, err := os.Stat(srcOnHost); err != nil { + if err := fileutils.Exists(srcOnHost); err != nil { if errors.Is(err, os.ErrNotExist) { return nil } diff --git a/vendor/github.com/containers/common/pkg/umask/umask.go b/vendor/github.com/containers/common/pkg/umask/umask.go index 93f1d2b3c02..e63a2d7d114 100644 --- a/vendor/github.com/containers/common/pkg/umask/umask.go +++ b/vendor/github.com/containers/common/pkg/umask/umask.go @@ -4,6 +4,8 @@ import ( "fmt" "os" "path/filepath" + + "github.com/containers/storage/pkg/fileutils" ) // MkdirAllIgnoreUmask creates a directory by ignoring the currently set umask. @@ -13,7 +15,7 @@ func MkdirAllIgnoreUmask(dir string, mode os.FileMode) error { // Find all parent directories which would have been created by MkdirAll for { - if _, err := os.Stat(parent); err == nil { + if err := fileutils.Exists(parent); err == nil { break } else if !os.IsNotExist(err) { return fmt.Errorf("cannot stat %s: %w", dir, err) diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index 8229296d798..5a30107c786 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -7,6 +7,7 @@ import ( "regexp" "time" + "github.com/containers/storage/pkg/fileutils" "github.com/fsnotify/fsnotify" "github.com/sirupsen/logrus" "golang.org/x/exp/slices" @@ -56,7 +57,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e case e := <-chWait: return true, e case <-inotifyEvents: - _, err := os.Stat(path) + err := fileutils.Exists(path) if err == nil { return false, nil } @@ -68,7 +69,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e // if the inotify watcher could not have been created. It is // also useful when using inotify as if for any reasons we missed // a notification, we won't hang the process. - _, err := os.Stat(path) + err := fileutils.Exists(path) if err == nil { return false, nil } diff --git a/vendor/github.com/containers/common/pkg/version/version.go b/vendor/github.com/containers/common/pkg/version/version.go index 0d830060113..5632ed41956 100644 --- a/vendor/github.com/containers/common/pkg/version/version.go +++ b/vendor/github.com/containers/common/pkg/version/version.go @@ -3,9 +3,10 @@ package version import ( "bytes" "fmt" - "os" "os/exec" "strings" + + "github.com/containers/storage/pkg/fileutils" ) const ( @@ -15,21 +16,35 @@ const ( // Note: This function is copied from containers/podman libpod/util.go // Please see https://github.com/containers/common/pull/1460 func queryPackageVersion(cmdArg ...string) string { + err := fileutils.Exists(cmdArg[0]) + if err != nil { + return "" + } output := UnknownPackage if 1 < len(cmdArg) { cmd := exec.Command(cmdArg[0], cmdArg[1:]...) if outp, err := cmd.Output(); err == nil { output = string(outp) - deb := false if cmdArg[0] == "/usr/bin/dlocate" { // can return multiple matches l := strings.Split(output, "\n") output = l[0] - deb = true + r := strings.Split(output, ": ") + regexpFormat := `^..\s` + r[0] + `\s` + cmd = exec.Command(cmdArg[0], "-P", regexpFormat, "-l") + cmd.Env = []string{"COLUMNS=160"} // show entire value + // dlocate always returns exit code 1 for list command + if outp, _ = cmd.Output(); len(outp) > 0 { + lines := strings.Split(string(outp), "\n") + if len(lines) > 1 { + line := lines[len(lines)-2] // trailing newline + f := strings.Fields(line) + if len(f) >= 2 { + return f[1] + "_" + f[2] + } + } + } } else if cmdArg[0] == "/usr/bin/dpkg" { - deb = true - } - if deb { r := strings.Split(output, ": ") queryFormat := `${Package}_${Version}_${Architecture}` cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) @@ -49,26 +64,40 @@ func queryPackageVersion(cmdArg ...string) string { // Note: This function is copied from containers/podman libpod/util.go // Please see https://github.com/containers/common/pull/1460 func Package(program string) string { // program is full path - _, err := os.Stat(program) + err := fileutils.Exists(program) if err != nil { return UnknownPackage } - packagers := [][]string{ - {"/usr/bin/rpm", "-q", "-f"}, - {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) - {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) - {"/usr/bin/pacman", "-Qo"}, // Arch - {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) - {"/usr/bin/equery", "b"}, // Gentoo (slow) - {"/sbin/apk", "info", "-W"}, // Alpine - {"/usr/local/sbin/pkg", "which", "-q"}, // FreeBSD + + type Packager struct { + Format string + Command []string + } + packagers := []Packager{ + {"rpm", []string{"/usr/bin/rpm", "-q", "-f"}}, + {"deb", []string{"/usr/bin/dlocate", "-F"}}, // Debian, Ubuntu (quick) + {"deb", []string{"/usr/bin/dpkg", "-S"}}, // Debian, Ubuntu (slow) + {"pacman", []string{"/usr/bin/pacman", "-Qo"}}, // Arch + {"gentoo", []string{"/usr/bin/qfile", "-qv"}}, // Gentoo (quick) + {"gentoo", []string{"/usr/bin/equery", "b"}}, // Gentoo (slow) + {"apk", []string{"/sbin/apk", "info", "-W"}}, // Alpine + {"pkg", []string{"/usr/local/sbin/pkg", "which", "-q"}}, // FreeBSD } - for _, cmd := range packagers { + lastformat := "" + for _, packager := range packagers { + if packager.Format == lastformat { + continue + } + cmd := packager.Command cmd = append(cmd, program) if out := queryPackageVersion(cmd...); out != UnknownPackage { + if out == "" { + continue + } return out } + lastformat = packager.Format } return UnknownPackage } diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 13bc20e7e90..4e314277438 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20240102t155643z-f39f38d13" + IMAGE_SUFFIX: "c20240320t153921z-f39f38d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -116,6 +116,7 @@ debian_testing_task: &debian_testing lint_task: + alias: lint env: CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" container: @@ -134,6 +135,7 @@ lint_task: # Update metadata on VM images referenced by this repository state meta_task: + alias: meta container: image: "quay.io/libpod/imgts:latest" @@ -156,6 +158,7 @@ meta_task: vendor_task: + alias: vendor container: image: golang modules_cache: @@ -166,13 +169,20 @@ vendor_task: cross_task: + alias: cross container: image: golang:1.20 build_script: make cross -# Represent overall pass/fail status from required dependent tasks +# Status aggregator for all tests. This task simply ensures a defined +# set of tasks all passed, and allows confirming that based on the status +# of this task. success_task: + alias: success + # N/B: The prow merge-bot (tide) is sensitized to this exact name, DO NOT CHANGE IT. + # Ref: https://github.com/openshift/release/pull/49820 + name: "Total Success" depends_on: - lint - fedora_testing diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 3f4830156cb..3fcaa732e5d 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.53.0 +1.53.1-dev diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go index 0b17662109c..e00314d3fd2 100644 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ b/vendor/github.com/containers/storage/drivers/aufs/aufs.go @@ -41,6 +41,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" mountpk "github.com/containers/storage/pkg/mount" @@ -243,7 +244,7 @@ func (a *Driver) Metadata(id string) (map[string]string, error) { // Exists returns true if the given id is registered with // this driver func (a *Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { + if err := fileutils.Lexists(path.Join(a.rootPath(), "layers", id)); err != nil { return false } return true @@ -431,7 +432,7 @@ func atomicRemove(source string) error { case err == nil, os.IsNotExist(err): case os.IsExist(err): // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove - if _, e := os.Stat(source); !os.IsNotExist(e) { + if e := fileutils.Exists(source); !os.IsNotExist(e) { return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err) } default: diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index fd93d4e84da..11ae563647e 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -32,6 +32,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" @@ -589,11 +590,11 @@ func (d *Driver) setStorageSize(dir string, driver *Driver) error { // Remove the filesystem with given id. func (d *Driver) Remove(id string) error { dir := d.subvolumesDirID(id) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return err } quotasDir := d.quotasDirID(id) - if _, err := os.Stat(quotasDir); err == nil { + if err := fileutils.Exists(quotasDir); err == nil { if err := os.Remove(quotasDir); err != nil { return err } @@ -669,7 +670,7 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { // Exists checks if the id exists in the filesystem. func (d *Driver) Exists(id string) bool { dir := d.subvolumesDirID(id) - _, err := os.Stat(dir) + err := fileutils.Exists(dir) return err == nil } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go index 5d8df8a78ea..6ded90f7f74 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go @@ -22,6 +22,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/dmesg" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/loopback" "github.com/containers/storage/pkg/mount" @@ -257,7 +258,7 @@ func (devices *DeviceSet) hasImage(name string) bool { dirname := devices.loopbackDir() filename := path.Join(dirname, name) - _, err := os.Stat(filename) + err := fileutils.Exists(filename) return err == nil } @@ -1192,7 +1193,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error { defer devices.deactivateDevice(info) fsMountPoint := "/run/containers/storage/mnt" - if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { + if err := fileutils.Exists(fsMountPoint); os.IsNotExist(err) { if err := os.MkdirAll(fsMountPoint, 0o700); err != nil { return err } diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go index 8b8a1d17780..b188530318e 100644 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ b/vendor/github.com/containers/storage/drivers/devmapper/driver.go @@ -12,6 +12,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/devicemapper" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" @@ -222,7 +223,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { } idFile := path.Join(mp, "id") - if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { + if err := fileutils.Exists(idFile); err != nil && os.IsNotExist(err) { // Create an "id" file with the container/image id in it to help reconstruct this in case // of later problems if err := os.WriteFile(idFile, []byte(id), 0o600); err != nil { diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index aa99fdead26..049e295a798 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -10,6 +10,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" @@ -471,7 +472,7 @@ func ScanPriorDrivers(root string) map[string]bool { for driver := range drivers { p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil { + if err := fileutils.Exists(p); err == nil { driversMap[driver] = true } } diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index f007aa9437b..f26cf695bde 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -24,6 +24,7 @@ import ( "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/fsutils" "github.com/containers/storage/pkg/idmap" "github.com/containers/storage/pkg/idtools" @@ -574,7 +575,7 @@ func parseOptions(options []string) (*overlayOptions, error) { case "mount_program": logrus.Debugf("overlay: mount_program=%s", val) if val != "" { - _, err := os.Stat(val) + err := fileutils.Exists(val) if err != nil { return nil, fmt.Errorf("overlay: can't stat program %q: %w", val, err) } @@ -676,7 +677,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) { } for _, dir := range []string{home, runhome} { - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { _ = idtools.MkdirAllAs(dir, 0o700, 0, 0) } } @@ -854,7 +855,7 @@ func (d *Driver) Status() [][2]string { // LowerDir, UpperDir, WorkDir and MergeDir used to store data. func (d *Driver) Metadata(id string) (map[string]string, error) { dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return nil, err } @@ -1016,7 +1017,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnl rootGID = int(st.GID()) } - if _, err := system.Lstat(dir); err == nil { + if err := fileutils.Lexists(dir); err == nil { logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir) // Don’t just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir, // so that we can’t end up with two symlinks in linkDir pointing to the same layer. @@ -1144,7 +1145,7 @@ func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { + if err := fileutils.Lexists(parentDir); err != nil { return "", err } @@ -1197,10 +1198,10 @@ func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) { newpath := path.Join(homedir, id) - if _, err := os.Stat(newpath); err != nil { + if err := fileutils.Exists(newpath); err != nil { for _, p := range d.getAllImageStores() { l := path.Join(p, d.name, id) - _, err = os.Stat(l) + err = fileutils.Exists(l) if err == nil { return l, homedir, true } @@ -1340,7 +1341,7 @@ func (d *Driver) recreateSymlinks() error { linkPath := path.Join(d.home, linkDir, strings.Trim(string(data), "\n")) // Check if the symlink exists, and if it doesn't, create it again with the // name we got from the "link" file - _, err = os.Lstat(linkPath) + err = fileutils.Lexists(linkPath) if err != nil && os.IsNotExist(err) { if err := os.Symlink(path.Join("..", dir.Name(), "diff"), linkPath); err != nil { errs = multierror.Append(errs, err) @@ -1417,7 +1418,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) { dir, _, inAdditionalStore := d.dir2(id, false) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return "", err } @@ -1528,8 +1529,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO composeFsLayersDir := filepath.Join(dir, "composefs-layers") maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) { composefsBlob := d.getComposefsData(lowerID) - _, err = os.Stat(composefsBlob) - if err != nil { + if err := fileutils.Exists(composefsBlob); err != nil { if os.IsNotExist(err) { return "", nil } @@ -1633,11 +1633,11 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO absLowers = append(absLowers, lower) diffN = 1 - _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) for err == nil { absLowers = append(absLowers, dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) diffN++ - _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) + err = fileutils.Exists(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) } } @@ -1660,15 +1660,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO return "", err } // if it is in an additional store, do not fail if the directory already exists - if _, err2 := os.Stat(diffDir); err2 != nil { + if err2 := fileutils.Exists(diffDir); err2 != nil { return "", err } } mergedDir := path.Join(dir, "merged") - // Create the driver merged dir - if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return "", err + // Attempt to create the merged dir only if it doesn't exist. + if err := fileutils.Exists(mergedDir); err != nil && os.IsNotExist(err) { + if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { + return "", err + } } if count := d.ctr.Increment(mergedDir); count > 1 { return mergedDir, nil @@ -1834,14 +1836,14 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO // Put unmounts the mount path created for the give id. func (d *Driver) Put(id string) error { dir, _, inAdditionalStore := d.dir2(id, false) - if _, err := os.Stat(dir); err != nil { + if err := fileutils.Exists(dir); err != nil { return err } mountpoint := path.Join(dir, "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } - if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { + if err := fileutils.Exists(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { return err } @@ -1849,7 +1851,7 @@ func (d *Driver) Put(id string) error { mappedRoot := filepath.Join(d.home, id, "mapped") // It should not happen, but cleanup any mapped mount if it was leaked. - if _, err := os.Stat(mappedRoot); err == nil { + if err := fileutils.Exists(mappedRoot); err == nil { mounts, err := os.ReadDir(mappedRoot) if err == nil { // Go through all of the mapped mounts. @@ -1920,7 +1922,7 @@ func (d *Driver) Put(id string) error { // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) + err := fileutils.Exists(d.dir(id)) return err == nil } @@ -2332,7 +2334,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp } for err == nil { i++ - _, err = os.Stat(nameWithSuffix(diffDir, i)) + err = fileutils.Exists(nameWithSuffix(diffDir, i)) } for i > 0 { @@ -2417,7 +2419,7 @@ func (d *Driver) getAdditionalLayerPath(dgst digest.Digest, ref string) (string, filepath.Join(target, "info"), filepath.Join(target, "blob"), } { - if _, err := os.Stat(p); err != nil { + if err := fileutils.Exists(p); err != nil { wrapped := fmt.Errorf("failed to stat additional layer %q: %w", p, err) return "", fmt.Errorf("%v: %w", wrapped, graphdriver.ErrLayerUnknown) } diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index 9b552254b9d..db9032117da 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -12,6 +12,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/parsers" "github.com/containers/storage/pkg/system" @@ -210,7 +211,7 @@ func (d *Driver) dir2(id string, useImageStore bool) string { } else { homedir = filepath.Join(d.home, "dir", filepath.Base(id)) } - if _, err := os.Stat(homedir); err != nil { + if err := fileutils.Exists(homedir); err != nil { additionalHomes := d.additionalHomes[:] if d.imageStore != "" { additionalHomes = append(additionalHomes, d.imageStore) @@ -269,7 +270,7 @@ func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { // Exists checks to see if the directory exists for the given id. func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) + err := fileutils.Exists(d.dir(id)) return err == nil } diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go index 8c2dc18aec3..18f90fdc535 100644 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ b/vendor/github.com/containers/storage/drivers/windows/windows.go @@ -26,6 +26,7 @@ import ( graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/directory" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/longpath" @@ -231,7 +232,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt if err != nil { return err } - if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { + if err := fileutils.Exists(filepath.Join(parentPath, "Files")); err == nil { // This is a legitimate parent layer (not the empty "-init" layer), // so include it in the layer chain. layerChain = []string{parentPath} @@ -266,7 +267,7 @@ func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt } } - if _, err := os.Lstat(d.dir(parent)); err != nil { + if err := fileutils.Lexists(d.dir(parent)); err != nil { if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) } diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go index 70f76d66d4f..77c9c818c9b 100644 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ b/vendor/github.com/containers/storage/pkg/archive/archive.go @@ -1023,7 +1023,7 @@ loop: // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllAndChownNew(parentPath, 0o777, rootIDs) if err != nil { return err diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go index 01c6f30c2de..4487845497d 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes.go @@ -13,6 +13,7 @@ import ( "syscall" "time" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" @@ -106,7 +107,7 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { func aufsWhiteoutPresent(root, path string) (bool, error) { f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path)) - _, err := os.Stat(filepath.Join(root, f)) + err := fileutils.Exists(filepath.Join(root, f)) if err == nil { return true, nil } diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go index 55f753bf41f..4d46167d705 100644 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ b/vendor/github.com/containers/storage/pkg/archive/copy.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -94,7 +95,7 @@ func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { // items in the resulting tar archive to match the given rebaseName if not "". func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { + if err = fileutils.Lexists(sourcePath); err != nil { // Catches the case where the source does not exist or is not a // directory if asserted to be a directory, as this also causes an // error. diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go index 71355185987..ceaa8b0b799 100644 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ b/vendor/github.com/containers/storage/pkg/archive/diff.go @@ -10,6 +10,7 @@ import ( "runtime" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/pools" "github.com/containers/storage/pkg/system" @@ -84,7 +85,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + if err := fileutils.Lexists(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0o755) if err != nil { return 0, err @@ -130,7 +131,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, if strings.HasPrefix(base, WhiteoutPrefix) { dir := filepath.Dir(path) if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) + err := fileutils.Lexists(dir) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go index f221a22835c..5ff9f6b5117 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" ) @@ -76,7 +77,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions rootIDs := idMappings.RootPair() dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := fileutils.Exists(dest); os.IsNotExist(err) { if err := idtools.MkdirAllAndChownNew(dest, 0o755, rootIDs); err != nil { return err } diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index 1e3ad86d1a1..3393af6685e 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strconv" "strings" @@ -21,6 +22,7 @@ import ( jsoniter "github.com/json-iterator/go" digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" ) const ( @@ -30,7 +32,7 @@ const ( digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) -type metadata struct { +type cacheFile struct { tagLen int digestLen int tags []byte @@ -38,13 +40,22 @@ type metadata struct { } type layer struct { - id string - metadata *metadata - target string + id string + cacheFile *cacheFile + target string + // mmapBuffer is nil when the cache file is fully loaded in memory. + // Otherwise it points to a mmap'ed buffer that is referenced by cacheFile.vdata. + mmapBuffer []byte + + // reloadWithMmap is set when the current process generates the cache file, + // and cacheFile reuses the memory buffer used by the generation function. + // Next time the layer cache is used, attempt to reload the file using + // mmap. + reloadWithMmap bool } type layersCache struct { - layers []layer + layers []*layer refs int store storage.Store mutex sync.RWMutex @@ -56,14 +67,29 @@ var ( cache *layersCache ) +func (c *layer) release() { + runtime.SetFinalizer(c, nil) + if c.mmapBuffer != nil { + unix.Munmap(c.mmapBuffer) + } +} + +func layerFinalizer(c *layer) { + c.release() +} + func (c *layersCache) release() { cacheMutex.Lock() defer cacheMutex.Unlock() c.refs-- - if c.refs == 0 { - cache = nil + if c.refs != 0 { + return + } + for _, l := range c.layers { + l.release() } + cache = nil } func getLayersCacheRef(store storage.Store) *layersCache { @@ -91,90 +117,160 @@ func getLayersCache(store storage.Store) (*layersCache, error) { return c, nil } +// loadLayerBigData attempts to load the specified cacheKey from a file and mmap its content. +// If the cache is not backed by a file, then it loads the entire content in memory. +// Returns the cache content, and if mmap'ed, the mmap buffer to Munmap. +func (c *layersCache) loadLayerBigData(layerID, bigDataKey string) ([]byte, []byte, error) { + inputFile, err := c.store.LayerBigData(layerID, bigDataKey) + if err != nil { + return nil, nil, err + } + defer inputFile.Close() + + // if the cache is backed by a file, attempt to mmap it. + if osFile, ok := inputFile.(*os.File); ok { + st, err := osFile.Stat() + if err != nil { + logrus.Warningf("Error stat'ing cache file for layer %q: %v", layerID, err) + goto fallback + } + size := st.Size() + if size == 0 { + logrus.Warningf("Cache file size is zero for layer %q: %v", layerID, err) + goto fallback + } + buf, err := unix.Mmap(int(osFile.Fd()), 0, int(size), unix.PROT_READ, unix.MAP_SHARED) + if err != nil { + logrus.Warningf("Error mmap'ing cache file for layer %q: %v", layerID, err) + goto fallback + } + // best effort advise to the kernel. + _ = unix.Madvise(buf, unix.MADV_RANDOM) + + return buf, buf, nil + } +fallback: + buf, err := io.ReadAll(inputFile) + return buf, nil, err +} + +func (c *layersCache) loadLayerCache(layerID string) (_ *layer, errRet error) { + buffer, mmapBuffer, err := c.loadLayerBigData(layerID, cacheKey) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + // there is no existing cache to load + if err != nil || buffer == nil { + return nil, nil + } + defer func() { + if errRet != nil && mmapBuffer != nil { + unix.Munmap(mmapBuffer) + } + }() + cacheFile, err := readCacheFileFromMemory(buffer) + if err != nil { + return nil, err + } + return c.createLayer(layerID, cacheFile, mmapBuffer) +} + +func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) { + clFile, err := c.store.LayerBigData(layerID, chunkedLayerDataKey) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, err + } + var lcd chunkedLayerData + if err == nil && clFile != nil { + defer clFile.Close() + cl, err := io.ReadAll(clFile) + if err != nil { + return nil, fmt.Errorf("open manifest file: %w", err) + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + + if err := json.Unmarshal(cl, &lcd); err != nil { + return nil, err + } + } + manifestReader, err := c.store.LayerBigData(layerID, bigDataKey) + if err != nil { + return nil, err + } + defer manifestReader.Close() + + manifest, err := io.ReadAll(manifestReader) + if err != nil { + return nil, fmt.Errorf("read manifest file: %w", err) + } + + cacheFile, err := writeCache(manifest, lcd.Format, layerID, c.store) + if err != nil { + return nil, err + } + l, err := c.createLayer(layerID, cacheFile, nil) + if err != nil { + return nil, err + } + l.reloadWithMmap = true + return l, nil +} + func (c *layersCache) load() error { c.mutex.Lock() defer c.mutex.Unlock() + loadedLayers := make(map[string]*layer) + for _, r := range c.layers { + loadedLayers[r.id] = r + } allLayers, err := c.store.Layers() if err != nil { return err } - existingLayers := make(map[string]string) - for _, r := range c.layers { - existingLayers[r.id] = r.target - } - currentLayers := make(map[string]string) + var newLayers []*layer for _, r := range allLayers { - currentLayers[r.ID] = r.ID - if _, found := existingLayers[r.ID]; found { - continue - } - - bigData, err := c.store.LayerBigData(r.ID, cacheKey) - // if the cache already exists, read and use it - if err == nil { - defer bigData.Close() - metadata, err := readMetadataFromCache(bigData) - if err == nil { - c.addLayer(r.ID, metadata) + // The layer is present in the store and it is already loaded. Attempt to + // re-use it if mmap'ed. + if l, found := loadedLayers[r.ID]; found { + // If the layer is not marked for re-load, move it to newLayers. + if !l.reloadWithMmap { + delete(loadedLayers, r.ID) + newLayers = append(newLayers, l) continue } - logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err) - } else if !errors.Is(err, os.ErrNotExist) { - return err - } - - var lcd chunkedLayerData - - clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey) - if err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - if clFile != nil { - cl, err := io.ReadAll(clFile) - if err != nil { - return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) - } - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err := json.Unmarshal(cl, &lcd); err != nil { - return err - } } - - // otherwise create it from the layer TOC. - manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) + // try to read the existing cache file. + l, err := c.loadLayerCache(r.ID) if err != nil { + logrus.Warningf("Error loading cache file for layer %q: %v", r.ID, err) + } + if l != nil { + newLayers = append(newLayers, l) continue } - defer manifestReader.Close() - - manifest, err := io.ReadAll(manifestReader) + // the cache file is either not present or broken. Try to generate it from the TOC. + l, err = c.createCacheFileFromTOC(r.ID) if err != nil { - return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) - } - - metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store) - if err == nil { - c.addLayer(r.ID, metadata) + logrus.Warningf("Error creating cache file for layer %q: %v", r.ID, err) } - } - - var newLayers []layer - for _, l := range c.layers { - if _, found := currentLayers[l.id]; found { + if l != nil { newLayers = append(newLayers, l) } } + // The layers that are still in loadedLayers are either stale or fully loaded in memory. Clean them up. + for _, l := range loadedLayers { + l.release() + } c.layers = newLayers - return nil } // calculateHardLinkFingerprint calculates a hash that can be used to verify if a file // is usable for deduplication with hardlinks. // To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. -func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) { +func calculateHardLinkFingerprint(f *fileMetadata) (string, error) { digester := digest.Canonical.Digester() modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) @@ -214,7 +310,7 @@ func generateFileLocation(path string, offset, len uint64) []byte { // generateTag generates a tag in the form $DIGEST$OFFSET@LEN. // the [OFFSET; LEN] points to the variable length data where the file locations -// are stored. $DIGEST has length digestLen stored in the metadata file header. +// are stored. $DIGEST has length digestLen stored in the cache file file header. func generateTag(digest string, offset, len uint64) string { return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len) } @@ -231,13 +327,13 @@ type setBigData interface { // - digest(file.payload)) // - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) // - digest(i) for each i in chunks(file payload) -func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) { +func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) { var vdata bytes.Buffer tagLen := 0 digestLen := 0 var tagsBuffer bytes.Buffer - toc, err := prepareMetadata(manifest, format) + toc, err := prepareCacheFile(manifest, format) if err != nil { return nil, err } @@ -272,7 +368,6 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin if _, err := vdata.Write(location); err != nil { return nil, err } - digestLen = len(k.Digest) } if k.ChunkDigest != "" { @@ -369,7 +464,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count) - return &metadata{ + return &cacheFile{ digestLen: digestLen, tagLen: tagLen, tags: tagsBuffer.Bytes(), @@ -377,7 +472,9 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin }, nil } -func readMetadataFromCache(bigData io.Reader) (*metadata, error) { +func readCacheFileFromMemory(bigDataBuffer []byte) (*cacheFile, error) { + bigData := bytes.NewReader(bigDataBuffer) + var version, tagLen, digestLen, tagsLen, vdataLen uint64 if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil { return nil, err @@ -403,12 +500,10 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) { return nil, err } - vdata := make([]byte, vdataLen) - if _, err := bigData.Read(vdata); err != nil { - return nil, err - } + // retrieve the unread part of the buffer. + vdata := bigDataBuffer[len(bigDataBuffer)-bigData.Len():] - return &metadata{ + return &cacheFile{ tagLen: int(tagLen), digestLen: int(digestLen), tags: tags, @@ -416,7 +511,7 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) { }, nil } -func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) { +func prepareCacheFile(manifest []byte, format graphdriver.DifferOutputFormat) ([]*fileMetadata, error) { toc, err := unmarshalToc(manifest) if err != nil { // ignore errors here. They might be caused by a different manifest format. @@ -424,10 +519,17 @@ func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([] return nil, nil //nolint: nilnil } + var entries []fileMetadata + for i := range toc.Entries { + entries = append(entries, fileMetadata{ + FileMetadata: toc.Entries[i], + }) + } + switch format { case graphdriver.DifferOutputFormatDir: case graphdriver.DifferOutputFormatFlat: - toc.Entries, err = makeEntriesFlat(toc.Entries) + entries, err = makeEntriesFlat(entries) if err != nil { return nil, err } @@ -435,19 +537,19 @@ func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([] return nil, fmt.Errorf("unknown format %q", format) } - var r []*internal.FileMetadata + var r []*fileMetadata chunkSeen := make(map[string]bool) - for i := range toc.Entries { - d := toc.Entries[i].Digest + for i := range entries { + d := entries[i].Digest if d != "" { - r = append(r, &toc.Entries[i]) + r = append(r, &entries[i]) continue } // chunks do not use hard link dedup so keeping just one candidate is enough cd := toc.Entries[i].ChunkDigest if cd != "" && !chunkSeen[cd] { - r = append(r, &toc.Entries[i]) + r = append(r, &entries[i]) chunkSeen[cd] = true } } @@ -455,41 +557,43 @@ func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([] return r, nil } -func (c *layersCache) addLayer(id string, metadata *metadata) error { +func (c *layersCache) createLayer(id string, cacheFile *cacheFile, mmapBuffer []byte) (*layer, error) { target, err := c.store.DifferTarget(id) if err != nil { - return fmt.Errorf("get checkout directory layer %q: %w", id, err) + return nil, fmt.Errorf("get checkout directory layer %q: %w", id, err) } - - l := layer{ - id: id, - metadata: metadata, - target: target, + l := &layer{ + id: id, + cacheFile: cacheFile, + target: target, + mmapBuffer: mmapBuffer, } - c.layers = append(c.layers, l) - return nil + if mmapBuffer != nil { + runtime.SetFinalizer(l, layerFinalizer) + } + return l, nil } func byteSliceAsString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } -func findTag(digest string, metadata *metadata) (string, uint64, uint64) { - if len(digest) != metadata.digestLen { +func findTag(digest string, cacheFile *cacheFile) (string, uint64, uint64) { + if len(digest) != cacheFile.digestLen { return "", 0, 0 } - nElements := len(metadata.tags) / metadata.tagLen + nElements := len(cacheFile.tags) / cacheFile.tagLen i := sort.Search(nElements, func(i int) bool { - d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen]) + d := byteSliceAsString(cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+cacheFile.digestLen]) return strings.Compare(d, digest) >= 0 }) if i < nElements { - d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)]) + d := string(cacheFile.tags[i*cacheFile.tagLen : i*cacheFile.tagLen+len(digest)]) if digest == d { - startOff := i*metadata.tagLen + metadata.digestLen - parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@") + startOff := i*cacheFile.tagLen + cacheFile.digestLen + parts := strings.Split(string(cacheFile.tags[startOff:(i+1)*cacheFile.tagLen]), "@") off, _ := strconv.ParseInt(parts[0], 10, 64) @@ -509,9 +613,9 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, defer c.mutex.RUnlock() for _, layer := range c.layers { - digest, off, tagLen := findTag(digest, layer.metadata) + digest, off, tagLen := findTag(digest, layer.cacheFile) if digest != "" { - position := string(layer.metadata.vdata[off : off+tagLen]) + position := string(layer.cacheFile.vdata[off : off+tagLen]) parts := strings.SplitN(position, ":", 3) if len(parts) != 3 { continue @@ -527,7 +631,7 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64, // findFileInOtherLayers finds the specified file in other layers. // file is the file to look for. -func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) { +func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks bool) (string, string, error) { digest := file.Digest if useHardLinks { var err error @@ -548,45 +652,9 @@ func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (stri } func unmarshalToc(manifest []byte) (*internal.TOC, error) { - var buf bytes.Buffer - count := 0 var toc internal.TOC iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest) - for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - if strings.ToLower(field) != "entries" { - iter.Skip() - continue - } - for iter.ReadArray() { - for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { - switch strings.ToLower(field) { - case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime": - count += len(iter.ReadStringAsSlice()) - case "xattrs": - for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { - count += len(iter.ReadStringAsSlice()) - } - default: - iter.Skip() - } - } - } - break - } - - buf.Grow(count) - - getString := func(b []byte) string { - from := buf.Len() - buf.Write(b) - to := buf.Len() - return byteSliceAsString(buf.Bytes()[from:to]) - } - - pool := iter.Pool() - pool.ReturnIterator(iter) - iter = pool.BorrowIterator(manifest) for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { if strings.ToLower(field) == "version" { @@ -602,11 +670,11 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { for field := iter.ReadObject(); field != ""; field = iter.ReadObject() { switch strings.ToLower(field) { case "type": - m.Type = getString(iter.ReadStringAsSlice()) + m.Type = iter.ReadString() case "name": - m.Name = getString(iter.ReadStringAsSlice()) + m.Name = iter.ReadString() case "linkname": - m.Linkname = getString(iter.ReadStringAsSlice()) + m.Linkname = iter.ReadString() case "mode": m.Mode = iter.ReadInt64() case "size": @@ -616,19 +684,19 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { case "gid": m.GID = iter.ReadInt() case "modtime": - time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + time, err := time.Parse(time.RFC3339, iter.ReadString()) if err != nil { return nil, err } m.ModTime = &time case "accesstime": - time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + time, err := time.Parse(time.RFC3339, iter.ReadString()) if err != nil { return nil, err } m.AccessTime = &time case "changetime": - time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice())) + time, err := time.Parse(time.RFC3339, iter.ReadString()) if err != nil { return nil, err } @@ -638,7 +706,7 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { case "devminor": m.Devminor = iter.ReadInt64() case "digest": - m.Digest = getString(iter.ReadStringAsSlice()) + m.Digest = iter.ReadString() case "offset": m.Offset = iter.ReadInt64() case "endoffset": @@ -648,14 +716,13 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { case "chunkoffset": m.ChunkOffset = iter.ReadInt64() case "chunkdigest": - m.ChunkDigest = getString(iter.ReadStringAsSlice()) + m.ChunkDigest = iter.ReadString() case "chunktype": - m.ChunkType = getString(iter.ReadStringAsSlice()) + m.ChunkType = iter.ReadString() case "xattrs": m.Xattrs = make(map[string]string) for key := iter.ReadObject(); key != ""; key = iter.ReadObject() { - value := iter.ReadStringAsSlice() - m.Xattrs[key] = getString(value) + m.Xattrs[key] = iter.ReadString() } default: iter.Skip() @@ -677,6 +744,5 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) { return nil, fmt.Errorf("unexpected data after manifest") } - toc.StringsBuf = buf return &toc, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index caa581efe61..1136d67b80e 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -21,9 +21,6 @@ import ( type TOC struct { Version int `json:"version"` Entries []FileMetadata `json:"entries"` - - // internal: used by unmarshalToc - StringsBuf bytes.Buffer `json:"-"` } type FileMetadata struct { @@ -48,9 +45,6 @@ type FileMetadata struct { ChunkOffset int64 `json:"chunkOffset,omitempty"` ChunkDigest string `json:"chunkDigest,omitempty"` ChunkType string `json:"chunkType,omitempty"` - - // internal: computed by mergeTOCEntries. - Chunks []*FileMetadata `json:"-"` } const ( diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index f300df3472d..7bbfd7beddd 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -58,6 +58,21 @@ const ( copyGoRoutines = 32 ) +// fileMetadata is a wrapper around internal.FileMetadata with additional private fields that +// are not part of the TOC document. +// Type: TypeChunk entries are stored in Chunks, the primary [fileMetadata] entries never use TypeChunk. +type fileMetadata struct { + internal.FileMetadata + + // chunks stores the TypeChunk entries relevant to this entry when FileMetadata.Type == TypeReg. + chunks []*internal.FileMetadata + + // skipSetAttrs is set when the file attributes must not be + // modified, e.g. it is a hard link from a different source, + // or a composefs file. + skipSetAttrs bool +} + type compressedFileType int type chunkedDiffer struct { @@ -138,7 +153,8 @@ func doHardLink(srcFd int, destDirFd int, destBase string) error { return err } -func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { +func copyFileContent(srcFd int, fileMetadata *fileMetadata, dirfd int, mode os.FileMode, useHardLinks bool) (*os.File, int64, error) { + destFile := fileMetadata.Name src := fmt.Sprintf("/proc/self/fd/%d", srcFd) st, err := os.Stat(src) if err != nil { @@ -156,6 +172,8 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us err := doHardLink(srcFd, int(destDir.Fd()), destBase) if err == nil { + // if the file was deduplicated with a hard link, skip overriding file metadata. + fileMetadata.skipSetAttrs = true return nil, st.Size(), nil } } @@ -354,7 +372,7 @@ func makeCopyBuffer() []byte { // name is the path to the file to copy in source. // dirfd is an open file descriptor to the destination root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func copyFileFromOtherLayer(file *fileMetadata, source string, name string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { srcDirfd, err := unix.Open(source, unix.O_RDONLY, 0) if err != nil { return false, nil, 0, fmt.Errorf("open source file: %w", err) @@ -367,7 +385,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str } defer srcFile.Close() - dstFile, written, err := copyFileContent(int(srcFile.Fd()), file.Name, dirfd, 0, useHardLinks) + dstFile, written, err := copyFileContent(int(srcFile.Fd()), file, dirfd, 0, useHardLinks) if err != nil { return false, nil, 0, fmt.Errorf("copy content to %q: %w", file.Name, err) } @@ -376,7 +394,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str // canDedupMetadataWithHardLink says whether it is possible to deduplicate file with otherFile. // It checks that the two files have the same UID, GID, file mode and xattrs. -func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *internal.FileMetadata) bool { +func canDedupMetadataWithHardLink(file *fileMetadata, otherFile *fileMetadata) bool { if file.UID != otherFile.UID { return false } @@ -394,7 +412,7 @@ func canDedupMetadataWithHardLink(file *internal.FileMetadata, otherFile *intern // canDedupFileWithHardLink checks if the specified file can be deduplicated by an // open file, given its descriptor and stat data. -func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo) bool { +func canDedupFileWithHardLink(file *fileMetadata, fd int, s os.FileInfo) bool { st, ok := s.Sys().(*syscall.Stat_t) if !ok { return false @@ -420,11 +438,13 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo xattrs[x] = string(v) } // fill only the attributes used by canDedupMetadataWithHardLink. - otherFile := internal.FileMetadata{ - UID: int(st.Uid), - GID: int(st.Gid), - Mode: int64(st.Mode), - Xattrs: xattrs, + otherFile := fileMetadata{ + FileMetadata: internal.FileMetadata{ + UID: int(st.Uid), + GID: int(st.Gid), + Mode: int64(st.Mode), + Xattrs: xattrs, + }, } return canDedupMetadataWithHardLink(file, &otherFile) } @@ -434,7 +454,7 @@ func canDedupFileWithHardLink(file *internal.FileMetadata, fd int, s os.FileInfo // ostreeRepos is a list of OSTree repos. // dirfd is an open fd to the destination checkout. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileInOSTreeRepos(file *fileMetadata, ostreeRepos []string, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { digest, err := digest.Parse(file.Digest) if err != nil { logrus.Debugf("could not parse digest: %v", err) @@ -467,7 +487,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di continue } - dstFile, written, err := copyFileContent(fd, file.Name, dirfd, 0, useHardLinks) + dstFile, written, err := copyFileContent(fd, file, dirfd, 0, useHardLinks) if err != nil { logrus.Debugf("could not copyFileContent: %v", err) return false, nil, 0, nil @@ -487,7 +507,7 @@ func findFileInOSTreeRepos(file *internal.FileMetadata, ostreeRepos []string, di // file is the file to look for. // dirfd is an open file descriptor to the checkout root directory. // useHardLinks defines whether the deduplication can be performed using hard links. -func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { +func findFileInOtherLayers(cache *layersCache, file *fileMetadata, dirfd int, useHardLinks bool) (bool, *os.File, int64, error) { target, name, err := cache.findFileInOtherLayers(file, useHardLinks) if err != nil || name == "" { return false, nil, 0, err @@ -495,7 +515,7 @@ func findFileInOtherLayers(cache *layersCache, file *internal.FileMetadata, dirf return copyFileFromOtherLayer(file, target, name, dirfd, useHardLinks) } -func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOptions) error { +func maybeDoIDRemap(manifest []fileMetadata, options *archive.TarOptions) error { if options.ChownOpts == nil && len(options.UIDMaps) == 0 || len(options.GIDMaps) == 0 { return nil } @@ -529,7 +549,7 @@ func mapToSlice(inputMap map[uint32]struct{}) []uint32 { return out } -func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) { +func collectIDs(entries []fileMetadata) ([]uint32, []uint32) { uids := make(map[uint32]struct{}) gids := make(map[uint32]struct{}) for _, entry := range entries { @@ -549,7 +569,7 @@ type missingFileChunk struct { Gap int64 Hole bool - File *internal.FileMetadata + File *fileMetadata CompressedSize int64 UncompressedSize int64 @@ -582,7 +602,10 @@ func (o *originFile) OpenFile() (io.ReadCloser, error) { } // setFileAttrs sets the file attributes for file given metadata -func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions, usePath bool) error { +func setFileAttrs(dirfd int, file *os.File, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions, usePath bool) error { + if metadata.skipSetAttrs { + return nil + } if file == nil || file.Fd() < 0 { return errors.New("invalid file") } @@ -944,14 +967,14 @@ type destinationFile struct { dirfd int file *os.File hash hash.Hash - metadata *internal.FileMetadata + metadata *fileMetadata options *archive.TarOptions skipValidation bool to io.Writer recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err @@ -1314,7 +1337,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest st return nil } -func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *fileMetadata, options *archive.TarOptions) error { parent := filepath.Dir(name) base := filepath.Base(name) @@ -1343,7 +1366,7 @@ func safeMkdir(dirfd int, mode os.FileMode, name string, metadata *internal.File return setFileAttrs(dirfd, file, mode, metadata, options, false) } -func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func safeLink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { sourceFile, err := openFileUnderRoot(metadata.Linkname, dirfd, unix.O_PATH|unix.O_RDONLY|unix.O_NOFOLLOW, 0) if err != nil { return err @@ -1385,7 +1408,7 @@ func safeLink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, opti return setFileAttrs(dirfd, newFile, mode, metadata, options, false) } -func safeSymlink(dirfd int, mode os.FileMode, metadata *internal.FileMetadata, options *archive.TarOptions) error { +func safeSymlink(dirfd int, mode os.FileMode, metadata *fileMetadata, options *archive.TarOptions) error { destDir, destBase := filepath.Dir(metadata.Name), filepath.Base(metadata.Name) destDirFd := dirfd if destDir != "." { @@ -1473,7 +1496,7 @@ type hardLinkToCreate struct { dest string dirfd int mode os.FileMode - metadata *internal.FileMetadata + metadata *fileMetadata } func parseBooleanPullOption(storeOpts *storage.StoreOptions, name string, def bool) bool { @@ -1498,7 +1521,7 @@ func reopenFileReadOnly(f *os.File) (*os.File, error) { return os.NewFile(uintptr(fd), f.Name()), nil } -func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { +func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *fileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) { finalizeFile := func(dstFile *os.File) error { if dstFile == nil { return nil @@ -1549,8 +1572,8 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop return false, nil } -func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) { - var new []internal.FileMetadata +func makeEntriesFlat(mergedEntries []fileMetadata) ([]fileMetadata, error) { + var new []fileMetadata hashes := make(map[string]string) for i := range mergedEntries { @@ -1572,6 +1595,7 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta hashes[d] = d mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:]) + mergedEntries[i].skipSetAttrs = true new = append(new, mergedEntries[i]) } @@ -1729,13 +1753,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff var missingParts []missingPart - output.UIDs, output.GIDs = collectIDs(toc.Entries) - mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries) if err != nil { return output, err } + output.UIDs, output.GIDs = collectIDs(mergedEntries) output.Size = totalSize if err := maybeDoIDRemap(mergedEntries, options); err != nil { @@ -1789,7 +1812,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff njob int index int mode os.FileMode - metadata *internal.FileMetadata + metadata *fileMetadata found bool err error @@ -1961,7 +1984,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff remainingSize := r.Size // the file is missing, attempt to find individual chunks. - for _, chunk := range r.Chunks { + for _, chunk := range r.chunks { compressedSize := int64(chunk.EndOffset - chunk.Offset) size := remainingSize if chunk.ChunkSize > 0 { @@ -2045,7 +2068,7 @@ func mustSkipFile(fileType compressedFileType, e internal.FileMetadata) bool { return false } -func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]internal.FileMetadata, int64, error) { +func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []internal.FileMetadata) ([]fileMetadata, int64, error) { var totalFilesSize int64 countNextChunks := func(start int) int { @@ -2069,11 +2092,11 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i } } - mergedEntries := make([]internal.FileMetadata, size) + mergedEntries := make([]fileMetadata, size) m := 0 for i := 0; i < len(entries); i++ { - e := entries[i] - if mustSkipFile(fileType, e) { + e := fileMetadata{FileMetadata: entries[i]} + if mustSkipFile(fileType, entries[i]) { continue } @@ -2086,12 +2109,12 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i if e.Type == TypeReg { nChunks := countNextChunks(i + 1) - e.Chunks = make([]*internal.FileMetadata, nChunks+1) + e.chunks = make([]*internal.FileMetadata, nChunks+1) for j := 0; j <= nChunks; j++ { // we need a copy here, otherwise we override the // .Size later copy := entries[i+j] - e.Chunks[j] = © + e.chunks[j] = © e.EndOffset = entries[i+j].EndOffset } i += nChunks @@ -2110,10 +2133,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i } lastChunkOffset := mergedEntries[i].EndOffset - for j := len(mergedEntries[i].Chunks) - 1; j >= 0; j-- { - mergedEntries[i].Chunks[j].EndOffset = lastChunkOffset - mergedEntries[i].Chunks[j].Size = mergedEntries[i].Chunks[j].EndOffset - mergedEntries[i].Chunks[j].Offset - lastChunkOffset = mergedEntries[i].Chunks[j].Offset + for j := len(mergedEntries[i].chunks) - 1; j >= 0; j-- { + mergedEntries[i].chunks[j].EndOffset = lastChunkOffset + mergedEntries[i].chunks[j].Size = mergedEntries[i].chunks[j].EndOffset - mergedEntries[i].chunks[j].Offset + lastChunkOffset = mergedEntries[i].chunks[j].Offset } } return mergedEntries, totalFilesSize, nil diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go new file mode 100644 index 00000000000..f3087d7df6d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go @@ -0,0 +1,34 @@ +//go:build !windows +// +build !windows + +package fileutils + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// Exists checks whether a file or directory exists at the given path. +// If the path is a symlink, the symlink is followed. +func Exists(path string) error { + // It uses unix.Faccessat which is a faster operation compared to os.Stat for + // simply checking the existence of a file. + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) + if err != nil { + return &os.PathError{Op: "faccessat", Path: path, Err: err} + } + return nil +} + +// Lexists checks whether a file or directory exists at the given path. +// If the path is a symlink, the symlink itself is checked. +func Lexists(path string) error { + // It uses unix.Faccessat which is a faster operation compared to os.Stat for + // simply checking the existence of a file. + err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) + if err != nil { + return &os.PathError{Op: "faccessat", Path: path, Err: err} + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go new file mode 100644 index 00000000000..355cf046477 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go @@ -0,0 +1,18 @@ +package fileutils + +import ( + "os" +) + +// Exists checks whether a file or directory exists at the given path. +func Exists(path string) error { + _, err := os.Stat(path) + return err +} + +// Lexists checks whether a file or directory exists at the given path, without +// resolving symlinks +func Lexists(path string) error { + _, err := os.Lstat(path) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go index 9d0714b1b93..85ce2d5260a 100644 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -344,7 +344,7 @@ func ReadSymlinkedPath(path string) (realPath string, err error) { if realPath, err = filepath.EvalSymlinks(realPath); err != nil { return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) } - if _, err := os.Stat(realPath); err != nil { + if err := Exists(realPath); err != nil { return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) } return realPath, nil @@ -352,7 +352,7 @@ func ReadSymlinkedPath(path string) (realPath string, err error) { // CreateIfNotExists creates a file or a directory only if it does not already exist. func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { + if err := Exists(path); err != nil { if os.IsNotExist(err) { if isDir { return os.MkdirAll(path, 0o755) diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go index d7cb4ac2fd7..7900af38a96 100644 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -13,6 +13,7 @@ import ( "sync" "syscall" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/system" "github.com/moby/sys/user" ) @@ -55,7 +56,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown if dirPath == "/" { break } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index c6f1251893e..253218e5adb 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -2820,22 +2820,42 @@ func (s *store) mount(id string, options drivers.MountOpts) (string, error) { } defer s.stopUsingGraphDriver() - rlstore, err := s.getLayerStoreLocked() + rlstore, lstores, err := s.bothLayerStoreKindsLocked() if err != nil { return "", err } - if err := rlstore.startWriting(); err != nil { - return "", err - } - defer rlstore.stopWriting() - if options.UidMaps != nil || options.GidMaps != nil { options.DisableShifting = !s.canUseShifting(options.UidMaps, options.GidMaps) } - if rlstore.Exists(id) { - return rlstore.Mount(id, options) + // function used to have a scope for rlstore.StopWriting() + tryMount := func() (string, error) { + if err := rlstore.startWriting(); err != nil { + return "", err + } + defer rlstore.stopWriting() + if rlstore.Exists(id) { + return rlstore.Mount(id, options) + } + return "", nil + } + mountPoint, err := tryMount() + if mountPoint != "" || err != nil { + return mountPoint, err + } + + // check if the layer is in a read-only store, and return a better error message + for _, store := range lstores { + if err := store.startReading(); err != nil { + return "", err + } + exists := store.Exists(id) + store.stopReading() + if exists { + return "", fmt.Errorf("mounting read/only store images is not allowed: %w", ErrLayerUnknown) + } } + return "", ErrLayerUnknown } diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index ad0bfa43a64..03e5f7ab643 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -11,6 +11,7 @@ import ( "github.com/BurntSushi/toml" cfg "github.com/containers/storage/pkg/config" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" @@ -76,7 +77,7 @@ func loadDefaultStoreOptions() { if path, ok := os.LookupEnv("XDG_CONFIG_HOME"); ok { homeConfigFile := filepath.Join(path, "containers", "storage.conf") - if _, err := os.Stat(homeConfigFile); err == nil { + if err := fileutils.Exists(homeConfigFile); err == nil { // user storage.conf in XDG_CONFIG_HOME if it exists defaultOverrideConfigFile = homeConfigFile } else { @@ -87,7 +88,7 @@ func loadDefaultStoreOptions() { } } - _, err := os.Stat(defaultOverrideConfigFile) + err := fileutils.Exists(defaultOverrideConfigFile) if err == nil { // The DefaultConfigFile() function returns the path // of the used storage.conf file, by returning defaultConfigFile @@ -150,7 +151,7 @@ func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) { return storageOpts, err } } - _, err = os.Stat(storageConf) + err = fileutils.Exists(storageConf) if err != nil && !os.IsNotExist(err) { return storageOpts, err } diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go index 5b4b31b80ac..b313a472884 100644 --- a/vendor/github.com/containers/storage/types/utils.go +++ b/vendor/github.com/containers/storage/types/utils.go @@ -7,6 +7,7 @@ import ( "strconv" "strings" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -31,7 +32,7 @@ func DefaultConfigFile() (string, error) { return path, nil } if !usePerUserStorage() { - if _, err := os.Stat(defaultOverrideConfigFile); err == nil { + if err := fileutils.Exists(defaultOverrideConfigFile); err == nil { return defaultOverrideConfigFile, nil } return defaultConfigFile, nil diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 1f72cdde187..05c7359e481 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -55,6 +55,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -93,6 +97,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -560,6 +565,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a028..2754bac6f16 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601ff..03744fbc765 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ade5..32a7f401d5d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21eb8..bbca17234aa 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 87f42879a87..4613724e9d1 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e052..a4f5bf91fc6 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9a14b81517e..01ec5245cdc 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + ## 1.31.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 5b46a165815..ffb81b1feb3 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.31.1" +const GOMEGA_VERSION = "1.32.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md index 55471852132..56d49275a74 100644 --- a/vendor/github.com/ulikunitz/xz/README.md +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -75,3 +75,14 @@ To decompress it use the following command. $ gxz -d bigfile.xz +## Security & Vulnerabilities + +The security policy is documented in [SECURITY.md](SECURITY.md). + +The software is not affected by the supply chain attack on the original xz +implementation, [CVE-2024-3094](https://nvd.nist.gov/vuln/detail/CVE-2024-3094). +This implementation doesn't share any files with the original xz implementation +and no patches or pull requests are accepted without a review. + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md index 5f7ec01b3b6..1bdc88878d4 100644 --- a/vendor/github.com/ulikunitz/xz/SECURITY.md +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -6,5 +6,14 @@ Currently the last minor version v0.5.x is supported. ## Reporting a Vulnerability -Report a vulnerability by creating a Github issue at -. Expect a response in a week. +You can privately report a vulnerability following this +[procedure](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). +Alternatively you can create a Github issue at +. + +In both cases expect a response in at least 7 days. + +## Security Advisories + +All security advisories for this project are published under +[github.com/ulikunitz/xz/security/advisories](https://github.com/ulikunitz/xz/security/advisories?state=published). diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index a3d6f19250c..c466ffeda5c 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -86,6 +86,11 @@ ## Log +### 2024-04-03 + +Release v0.5.12 updates README.md and SECURITY.md to address the supply chain +attack on the original xz implementation. + ### 2022-12-12 Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation @@ -99,7 +104,7 @@ it. Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The function allocated a slice of records immediately after reading the value -without further checks. Sincex the number has been too large the make function +without further checks. Since the number has been too large the make function did panic. The fix is to check the number against the expected number of records before allocating the records. diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 66aebae2588..c672ccf6986 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -33,6 +33,9 @@ #define CONSTBASE R16 #define BLOCKS R17 +// for VPERMXOR +#define MASK R18 + DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 DATA consts<>+0x10(SB)/8, $0x0000000000000001 @@ -53,7 +56,11 @@ DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 +DATA consts<>+0xa0(SB)/8, $0x5566774411223300 +DATA consts<>+0xa8(SB)/8, $0xddeeffcc99aabb88 +DATA consts<>+0xb0(SB)/8, $0x6677445522330011 +DATA consts<>+0xb8(SB)/8, $0xeeffccddaabb8899 +GLOBL consts<>(SB), RODATA, $0xc0 //func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 @@ -70,6 +77,9 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $48, R10 MOVD $64, R11 SRD $6, LEN, BLOCKS + // for VPERMXOR + MOVD $consts<>+0xa0(SB), MASK + MOVD $16, R20 // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -87,6 +97,10 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 // V28 LXVW4X (CONSTBASE)(R11), VS60 + // Load mask constants for VPERMXOR + LXVW4X (MASK)(R0), V20 + LXVW4X (MASK)(R20), V21 + // splat slot from V19 -> V26 VSPLTW $0, V19, V26 @@ -97,7 +111,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $10, R14 MOVD R14, CTR - + PCALIGN $16 loop_outer_vsx: // V0, V1, V2, V3 LXVW4X (R0)(CONSTBASE), VS32 @@ -128,22 +142,17 @@ loop_outer_vsx: VSPLTISW $12, V28 VSPLTISW $8, V29 VSPLTISW $7, V30 - + PCALIGN $16 loop_vsx: VADDUWM V0, V4, V0 VADDUWM V1, V5, V1 VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 + VPERMXOR V12, V0, V21, V12 + VPERMXOR V13, V1, V21, V13 + VPERMXOR V14, V2, V21, V14 + VPERMXOR V15, V3, V21, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -165,15 +174,10 @@ loop_vsx: VADDUWM V2, V6, V2 VADDUWM V3, V7, V3 - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 + VPERMXOR V12, V0, V20, V12 + VPERMXOR V13, V1, V20, V13 + VPERMXOR V14, V2, V20, V14 + VPERMXOR V15, V3, V20, V15 VADDUWM V8, V12, V8 VADDUWM V9, V13, V9 @@ -195,15 +199,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 + VPERMXOR V15, V0, V21, V15 + VPERMXOR V12, V1, V21, V12 + VPERMXOR V13, V2, V21, V13 + VPERMXOR V14, V3, V21, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -225,15 +224,10 @@ loop_vsx: VADDUWM V2, V7, V2 VADDUWM V3, V4, V3 - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 + VPERMXOR V15, V0, V20, V15 + VPERMXOR V12, V1, V20, V12 + VPERMXOR V13, V2, V20, V13 + VPERMXOR V14, V3, V20, V14 VADDUWM V10, V15, V10 VADDUWM V11, V12, V11 @@ -249,48 +243,48 @@ loop_vsx: VRLW V6, V30, V6 VRLW V7, V30, V7 VRLW V4, V30, V4 - BC 16, LT, loop_vsx + BDNZ loop_vsx VADDUWM V12, V26, V12 - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 + VMRGEW V0, V1, V27 + VMRGEW V2, V3, V28 - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 + VMRGOW V0, V1, V0 + VMRGOW V2, V3, V2 - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 + VMRGEW V4, V5, V29 + VMRGEW V6, V7, V30 XXPERMDI VS32, VS34, $0, VS33 XXPERMDI VS32, VS34, $3, VS35 XXPERMDI VS59, VS60, $0, VS32 XXPERMDI VS59, VS60, $3, VS34 - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 + VMRGOW V4, V5, V4 + VMRGOW V6, V7, V6 - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 + VMRGEW V8, V9, V27 + VMRGEW V10, V11, V28 XXPERMDI VS36, VS38, $0, VS37 XXPERMDI VS36, VS38, $3, VS39 XXPERMDI VS61, VS62, $0, VS36 XXPERMDI VS61, VS62, $3, VS38 - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 + VMRGOW V8, V9, V8 + VMRGOW V10, V11, V10 - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 + VMRGEW V12, V13, V29 + VMRGEW V14, V15, V30 XXPERMDI VS40, VS42, $0, VS41 XXPERMDI VS40, VS42, $3, VS43 XXPERMDI VS59, VS60, $0, VS40 XXPERMDI VS59, VS60, $3, VS42 - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 + VMRGOW V12, V13, V12 + VMRGOW V14, V15, V14 VSPLTISW $4, V27 VADDUWM V26, V27, V26 @@ -431,7 +425,7 @@ tail_vsx: ADD $-1, R11, R12 ADD $-1, INP ADD $-1, OUT - + PCALIGN $16 looptail_vsx: // Copying the result to OUT // in bytes. @@ -439,7 +433,7 @@ looptail_vsx: MOVBZU 1(INP), TMP XOR KEY, TMP, KEY MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx + BDNZ looptail_vsx // Clear the stack values STXVW4X VS48, (R11)(R0) diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index c2dfe3268c5..e2ae4f891bb 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -426,6 +426,35 @@ func (l ServerAuthError) Error() string { return "[" + strings.Join(errs, ", ") + "]" } +// ServerAuthCallbacks defines server-side authentication callbacks. +type ServerAuthCallbacks struct { + // PasswordCallback behaves like [ServerConfig.PasswordCallback]. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback behaves like [ServerConfig.PublicKeyCallback]. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback behaves like [ServerConfig.KeyboardInteractiveCallback]. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // GSSAPIWithMICConfig behaves like [ServerConfig.GSSAPIWithMICConfig]. + GSSAPIWithMICConfig *GSSAPIWithMICConfig +} + +// PartialSuccessError can be returned by any of the [ServerConfig] +// authentication callbacks to indicate to the client that authentication has +// partially succeeded, but further steps are required. +type PartialSuccessError struct { + // Next defines the authentication callbacks to apply to further steps. The + // available methods communicated to the client are based on the non-nil + // ServerAuthCallbacks fields. + Next ServerAuthCallbacks +} + +func (p *PartialSuccessError) Error() string { + return "ssh: authenticated with partial success" +} + // ErrNoAuth is the error value returned if no // authentication method has been passed yet. This happens as a normal // part of the authentication loop, since the client first tries @@ -439,8 +468,18 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err var perms *Permissions authFailures := 0 + noneAuthCount := 0 var authErrs []error var displayedBanner bool + partialSuccessReturned := false + // Set the initial authentication callbacks from the config. They can be + // changed if a PartialSuccessError is returned. + authConfig := ServerAuthCallbacks{ + PasswordCallback: config.PasswordCallback, + PublicKeyCallback: config.PublicKeyCallback, + KeyboardInteractiveCallback: config.KeyboardInteractiveCallback, + GSSAPIWithMICConfig: config.GSSAPIWithMICConfig, + } userAuthLoop: for { @@ -471,6 +510,11 @@ userAuthLoop: return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) } + if s.user != userAuthReq.User && partialSuccessReturned { + return nil, fmt.Errorf("ssh: client changed the user after a partial success authentication, previous user %q, current user %q", + s.user, userAuthReq.User) + } + s.user = userAuthReq.User if !displayedBanner && config.BannerCallback != nil { @@ -491,20 +535,18 @@ userAuthLoop: switch userAuthReq.Method { case "none": - if config.NoClientAuth { + noneAuthCount++ + // We don't allow none authentication after a partial success + // response. + if config.NoClientAuth && !partialSuccessReturned { if config.NoClientAuthCallback != nil { perms, authErr = config.NoClientAuthCallback(s) } else { authErr = nil } } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } case "password": - if config.PasswordCallback == nil { + if authConfig.PasswordCallback == nil { authErr = errors.New("ssh: password auth not configured") break } @@ -518,17 +560,17 @@ userAuthLoop: return nil, parseError(msgUserAuthRequest) } - perms, authErr = config.PasswordCallback(s, password) + perms, authErr = authConfig.PasswordCallback(s, password) case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { + if authConfig.KeyboardInteractiveCallback == nil { authErr = errors.New("ssh: keyboard-interactive auth not configured") break } prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + perms, authErr = authConfig.KeyboardInteractiveCallback(s, prompter.Challenge) case "publickey": - if config.PublicKeyCallback == nil { + if authConfig.PublicKeyCallback == nil { authErr = errors.New("ssh: publickey auth not configured") break } @@ -562,11 +604,18 @@ userAuthLoop: if !ok { candidate.user = s.user candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( + candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey) + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + + if (candidate.result == nil || isPartialSuccessError) && + candidate.perms != nil && + candidate.perms.CriticalOptions != nil && + candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + if err := checkSourceAddress( s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + candidate.perms.CriticalOptions[sourceAddressCriticalOption]); err != nil { + candidate.result = err + } } cache.add(candidate) } @@ -578,8 +627,8 @@ userAuthLoop: if len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - - if candidate.result == nil { + _, isPartialSuccessError := candidate.result.(*PartialSuccessError) + if candidate.result == nil || isPartialSuccessError { okMsg := userAuthPubKeyOkMsg{ Algo: algo, PubKey: pubKeyData, @@ -629,11 +678,11 @@ userAuthLoop: perms = candidate.perms } case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { + if authConfig.GSSAPIWithMICConfig == nil { authErr = errors.New("ssh: gssapi-with-mic auth not configured") break } - gssapiConfig := config.GSSAPIWithMICConfig + gssapiConfig := authConfig.GSSAPIWithMICConfig userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) if err != nil { return nil, parseError(msgUserAuthRequest) @@ -689,49 +738,70 @@ userAuthLoop: break userAuthLoop } - authFailures++ - if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { - // If we have hit the max attempts, don't bother sending the - // final SSH_MSG_USERAUTH_FAILURE message, since there are - // no more authentication methods which can be attempted, - // and this message may cause the client to re-attempt - // authentication while we send the disconnect message. - // Continue, and trigger the disconnect at the start of - // the loop. - // - // The SSH specification is somewhat confusing about this, - // RFC 4252 Section 5.1 requires each authentication failure - // be responded to with a respective SSH_MSG_USERAUTH_FAILURE - // message, but Section 4 says the server should disconnect - // after some number of attempts, but it isn't explicit which - // message should take precedence (i.e. should there be a failure - // message than a disconnect message, or if we are going to - // disconnect, should we only send that message.) - // - // Either way, OpenSSH disconnects immediately after the last - // failed authnetication attempt, and given they are typically - // considered the golden implementation it seems reasonable - // to match that behavior. - continue + var failureMsg userAuthFailureMsg + + if partialSuccess, ok := authErr.(*PartialSuccessError); ok { + // After a partial success error we don't allow changing the user + // name and execute the NoClientAuthCallback. + partialSuccessReturned = true + + // In case a partial success is returned, the server may send + // a new set of authentication methods. + authConfig = partialSuccess.Next + + // Reset pubkey cache, as the new PublicKeyCallback might + // accept a different set of public keys. + cache = pubKeyCache{} + + // Send back a partial success message to the user. + failureMsg.PartialSuccess = true + } else { + // Allow initial attempt of 'none' without penalty. + if authFailures > 0 || userAuthReq.Method != "none" || noneAuthCount != 1 { + authFailures++ + } + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authentication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } } - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { + if authConfig.PasswordCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "password") } - if config.PublicKeyCallback != nil { + if authConfig.PublicKeyCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "publickey") } - if config.KeyboardInteractiveCallback != nil { + if authConfig.KeyboardInteractiveCallback != nil { failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { + if authConfig.GSSAPIWithMICConfig != nil && authConfig.GSSAPIWithMICConfig.Server != nil && + authConfig.GSSAPIWithMICConfig.AllowLogin != nil { failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") } if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + return nil, errors.New("ssh: no authentication methods available") } if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go index 30f632c577b..b618162aab6 100644 --- a/vendor/golang.org/x/sync/semaphore/semaphore.go +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -35,11 +35,25 @@ type Weighted struct { // Acquire acquires the semaphore with a weight of n, blocking until resources // are available or ctx is done. On success, returns nil. On failure, returns // ctx.Err() and leaves the semaphore unchanged. -// -// If ctx is already done, Acquire may still succeed without blocking. func (s *Weighted) Acquire(ctx context.Context, n int64) error { + done := ctx.Done() + s.mu.Lock() + select { + case <-done: + // ctx becoming done has "happened before" acquiring the semaphore, + // whether it became done before the call began or while we were + // waiting for the mutex. We prefer to fail even if we could acquire + // the mutex without blocking. + s.mu.Unlock() + return ctx.Err() + default: + } if s.size-s.cur >= n && s.waiters.Len() == 0 { + // Since we hold s.mu and haven't synchronized since checking done, if + // ctx becomes done before we return here, it becoming done must have + // "happened concurrently" with this call - it cannot "happen before" + // we return in this branch. So, we're ok to always acquire here. s.cur += n s.mu.Unlock() return nil @@ -48,7 +62,7 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { if n > s.size { // Don't make other Acquire calls block on one that's doomed to fail. s.mu.Unlock() - <-ctx.Done() + <-done return ctx.Err() } @@ -58,14 +72,14 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { s.mu.Unlock() select { - case <-ctx.Done(): - err := ctx.Err() + case <-done: s.mu.Lock() select { case <-ready: - // Acquired the semaphore after we were canceled. Rather than trying to - // fix up the queue, just pretend we didn't notice the cancelation. - err = nil + // Acquired the semaphore after we were canceled. + // Pretend we didn't and put the tokens back. + s.cur -= n + s.notifyWaiters() default: isFront := s.waiters.Front() == elem s.waiters.Remove(elem) @@ -75,9 +89,19 @@ func (s *Weighted) Acquire(ctx context.Context, n int64) error { } } s.mu.Unlock() - return err + return ctx.Err() case <-ready: + // Acquired the semaphore. Check that ctx isn't already done. + // We check the done channel instead of calling ctx.Err because we + // already have the channel, and ctx.Err is O(n) with the nesting + // depth of ctx. + select { + case <-done: + s.Release(n) + return ctx.Err() + default: + } return nil } } diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e59780a..7f602ffd26d 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c615..27c41b6f0a1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1520,6 +1520,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031d45..6525c62f3c2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6ace..d8cb71db0a6 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82c30..5c6035ddfa9 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 03543bd4bb8..137cc8df1d8 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -47,7 +47,7 @@ import ( func Find(importPath, srcDir string) (filename, path string) { cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) cmd.Dir = srcDir - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { return "", "" } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 11d5c8c3adf..6a57ce3b136 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -29,9 +29,13 @@ import ( "strconv" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" ) +// TODO(adonovan): think about generic aliases. + // A Path is an opaque name that identifies a types.Object // relative to its package. Conceptually, the name consists of a // sequence of destructuring operations applied to the package scope @@ -223,7 +227,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*types.TypeParam); !ok { + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -310,7 +314,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -395,13 +399,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { return "", false } - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { return "", false } @@ -444,6 +443,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -616,6 +617,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil + t = aliases.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 00000000000..f89112c8ee5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,28 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// When GoVersion>=1.22 and GODEBUG=gotypesalias=1, +// the Type() of the return value is a *types.Alias. +func NewAlias(pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled() { + tname := types.NewTypeName(pos, pkg, name, nil) + newAlias(tname, rhs) + return tname + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 00000000000..1872b56ff8f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } + +func (*Alias) Underlying() types.Type { panic("unreachable") } + +func (*Alias) Obj() *types.TypeName { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +// Always false for go <=1.21. Ignores GODEBUG. +func enabled() bool { return false } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 00000000000..8b92116284d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,72 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" + "os" + "strings" + "sync" +) + +// Alias is an alias of types.Alias. +type Alias = types.Alias + +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } + +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a +} + +// enabled returns true when types.Aliases are enabled. +func enabled() bool { + // Use the gotypesalias value in GODEBUG if set. + godebug := os.Getenv("GODEBUG") + value := -1 // last set value. + for _, f := range strings.Split(godebug, ",") { + switch f { + case "gotypesalias=1": + value = 1 + case "gotypesalias=0": + value = 0 + } + } + switch value { + case 0: + return false + case 1: + return true + default: + return aliasesDefault() + } +} + +// aliasesDefault reports if aliases are enabled by default. +func aliasesDefault() bool { + // Dynamically check if Aliases will be produced from go/types. + aliasesDefaultOnce.Do(func() { + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, gotypesaliasDefault = pkg.Scope().Lookup("A").Type().(*types.Alias) + }) + return gotypesaliasDefault +} + +var gotypesaliasDefault bool +var aliasesDefaultOnce sync.Once diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 2d078ccb19c..39df91124a4 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -259,13 +259,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func return } -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - type byPath []*types.Package func (a byPath) Len() int { return len(a) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 2ee8c70164f..638fc1d3b86 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -23,6 +23,7 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/tokeninternal" ) @@ -506,13 +507,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*types.TypeParam); ok { + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := constraint.(*types.Interface); iface != nil { + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -738,6 +739,8 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { + // TODO(adonovan): support types.Alias. + case *types.Named: if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) @@ -843,7 +846,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := ft.(*types.Named); named != nil { + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9fffa9ad05c..4d50eb8e587 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -22,6 +22,8 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) type intReader struct { @@ -522,7 +524,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := rhs.(*types.Interface) + iface, _ := aliases.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -587,14 +589,13 @@ func (r *importReader) obj(name string) { // If the receiver has any targs, set those as the // rparams of the method (since those are the // typeparams being used in the method sig/body). - base := baseType(recv.Type()) - assert(base != nil) - targs := base.TypeArgs() + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() var rparams []*types.TypeParam if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*types.TypeParam) + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -624,7 +625,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := constraint.(*types.Interface) + iface, _ := aliases.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -831,7 +832,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) + _, ok := aliases.Unalias(t).(*types.Interface) return ok } @@ -1030,7 +1031,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*types.TypeParam) + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1077,13 +1078,3 @@ func (r *importReader) byte() byte { } return x } - -func baseType(typ types.Type) *types.Named { - // pointer receivers are never types.Named types - if p, _ := typ.(*types.Pointer); p != nil { - typ = p.Elem() - } - // receiver base types are always (possibly generic) types.Named types - n, _ := typ.(*types.Named) - return n -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go deleted file mode 100644 index d892273efb6..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGo1_11 - -func additionalPredeclared() []types.Type { - return nil -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go index edbe6ea7041..0cd3b91b65a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 - package gcimporter import "go/types" diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go index 286bf445483..38b624cadab 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !(go1.18 && goexperiment.unified) -// +build !go1.18 !goexperiment.unified +//go:build !goexperiment.unified +// +build !goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go index b5d69ffbe68..b5118d0b3a5 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 && goexperiment.unified -// +build go1.18,goexperiment.unified +//go:build goexperiment.unified +// +build goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go deleted file mode 100644 index 8eb20729c2a..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" -) - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b977435f626..f4edc46ab74 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -4,9 +4,6 @@ // Derived from go/internal/gcimporter/ureader.go -//go:build go1.18 -// +build go1.18 - package gcimporter import ( @@ -16,6 +13,7 @@ import ( "sort" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" ) @@ -553,7 +551,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index 7e638ec24fc..ff9437a36cd 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -34,30 +34,16 @@ func GetLines(file *token.File) []int { lines []int _ []struct{} } - type tokenFile118 struct { - _ *token.FileSet // deleted in go1.19 - tokenFile119 - } - - type uP = unsafe.Pointer - switch unsafe.Sizeof(*file) { - case unsafe.Sizeof(tokenFile118{}): - var ptr *tokenFile118 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - case unsafe.Sizeof(tokenFile119{}): - var ptr *tokenFile119 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - default: + if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { panic("unexpected token.File size") } + var ptr *tokenFile119 + type uP = unsafe.Pointer + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines } // AddExistingFiles adds the specified files to the FileSet if they diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index cdab9885314..8c3a42dc311 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -2,20 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural // restrictions on a type parameter. // // An external version of these APIs is available in the @@ -27,6 +17,9 @@ import ( "go/ast" "go/token" "go/types" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) // UnpackIndexExpr extracts data from AST nodes that represent index @@ -72,9 +65,9 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke } } -// IsTypeParam reports whether t is a type parameter. +// IsTypeParam reports whether t is a type parameter (or an alias of one). func IsTypeParam(t types.Type) bool { - _, ok := t.(*types.TypeParam) + _, ok := aliases.Unalias(t).(*types.TypeParam) return ok } @@ -90,13 +83,8 @@ func OriginMethod(fn *types.Func) *types.Func { if recv == nil { return fn } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { + _, named := typesinternal.ReceiverNamed(recv) + if named == nil { // Receiver is a *types.Interface. return fn } @@ -158,6 +146,9 @@ func OriginMethod(fn *types.Func) *types.Func { // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = aliases.Unalias(V) + T = aliases.Unalias(T) + // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 7ea8840eab7..e66e9d0f48c 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -5,7 +5,10 @@ package typeparams import ( + "fmt" "go/types" + + "golang.org/x/tools/internal/aliases" ) // CoreType returns the core type of T or nil if T does not have a core type. @@ -109,7 +112,7 @@ func CoreType(T types.Type) types.Type { // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. func _NormalTerms(typ types.Type) ([]*types.Term, error) { - switch typ := typ.(type) { + switch typ := aliases.Unalias(typ).(type) { case *types.TypeParam: return StructuralTerms(typ) case *types.Union: @@ -120,3 +123,15 @@ func _NormalTerms(typ types.Type) ([]*types.Term, error) { return []*types.Term{types.NewTerm(false, typ)}, nil } } + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 00000000000..fea7c8b75e8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = aliases.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go index a42b072a67d..ef7ea290c0b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 - package typesinternal import ( diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 00000000000..b53f1786161 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go new file mode 100644 index 00000000000..377bf7a53b4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// toolchain is maximum version (<1.22) that the go toolchain used +// to build the current tool is known to support. +// +// When a tool is built with >=1.22, the value of toolchain is unused. +// +// x/tools does not support building with go <1.18. So we take this +// as the minimum possible maximum. +var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go new file mode 100644 index 00000000000..f65beed9d83 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package versions + +func init() { + if Compare(toolchain, Go1_19) < 0 { + toolchain = Go1_19 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go new file mode 100644 index 00000000000..1a9efa126cd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package versions + +func init() { + if Compare(toolchain, Go1_20) < 0 { + toolchain = Go1_20 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go new file mode 100644 index 00000000000..b7ef216dfec --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +func init() { + if Compare(toolchain, Go1_21) < 0 { + toolchain = Go1_21 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go index a7b79207aee..b4345d3349e 100644 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -12,9 +12,19 @@ import ( "go/types" ) -// FileVersions always reports the a file's Go version as the -// zero version at this Go version. -func FileVersions(info *types.Info, file *ast.File) string { return "" } +// FileVersion returns a language version (<=1.21) derived from runtime.Version() +// or an unknown future version. +func FileVersion(info *types.Info, file *ast.File) string { + // In x/tools built with Go <= 1.21, we do not have Info.FileVersions + // available. We use a go version derived from the toolchain used to + // compile the tool by default. + // This will be <= go1.21. We take this as the maximum version that + // this tool can support. + // + // There are no features currently in x/tools that need to tell fine grained + // differences for versions <1.22. + return toolchain +} -// InitFileVersions is a noop at this Go version. +// InitFileVersions is a noop when compiled with this Go version. func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go index 7b9ba89a822..e8180632a52 100644 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -12,10 +12,27 @@ import ( "go/types" ) -// FileVersions maps a file to the file's semantic Go version. -// The reported version is the zero version if a version cannot be determined. -func FileVersions(info *types.Info, file *ast.File) string { - return info.FileVersions[file] +// FileVersions returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future } // InitFileVersions initializes info to record Go versions for Go files. diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go index e16f6c33a52..8d1f7453dbf 100644 --- a/vendor/golang.org/x/tools/internal/versions/versions.go +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -4,6 +4,10 @@ package versions +import ( + "strings" +) + // Note: If we use build tags to use go/versions when go >=1.22, // we run into go.dev/issue/53737. Under some operations users would see an // import of "go/versions" even if they would not compile the file. @@ -45,6 +49,7 @@ func IsValid(x string) bool { return isValid(stripGo(x)) } // stripGo converts from a "go1.21" version to a "1.21" version. // If v does not start with "go", stripGo returns the empty string (a known invalid version). func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. if len(v) < 2 || v[:2] != "go" { return "" } diff --git a/vendor/modules.txt b/vendor/modules.txt index c213909eb1f..abd8f2f953e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -106,7 +106,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.4.0 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/common v0.58.1-0.20240318131753-6f1c96f53a78 +# github.com/containers/common v0.58.1-0.20240410144442-8db59bf2fcce ## explicit; go 1.20 github.com/containers/common/internal github.com/containers/common/internal/attributedstring @@ -250,7 +250,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/storage v1.53.0 +# github.com/containers/storage v1.53.1-0.20240411065836-1fd0dc1d20e5 ## explicit; go 1.20 github.com/containers/storage github.com/containers/storage/drivers @@ -484,7 +484,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.7 +# github.com/klauspost/compress v1.17.8 ## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -589,7 +589,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.31.1 +# github.com/onsi/gomega v1.32.0 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -710,7 +710,7 @@ github.com/tchap/go-patricia/v2/patricia # github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 ## explicit github.com/titanous/rocacheck -# github.com/ulikunitz/xz v0.5.11 +# github.com/ulikunitz/xz v0.5.12 ## explicit; go 1.12 github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash @@ -780,7 +780,7 @@ go.opentelemetry.io/otel/metric/embedded ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded -# golang.org/x/crypto v0.21.0 +# golang.org/x/crypto v0.22.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -811,12 +811,12 @@ golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/twofish golang.org/x/crypto/xts -# golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 +# golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.15.0 +# golang.org/x/mod v0.16.0 ## explicit; go 1.18 golang.org/x/mod/semver # golang.org/x/net v0.22.0 @@ -831,18 +831,18 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.18.0 +# golang.org/x/sys v0.19.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.18.0 +# golang.org/x/term v0.19.0 ## explicit; go 1.18 golang.org/x/term # golang.org/x/text v0.14.0 @@ -867,8 +867,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.18.0 -## explicit; go 1.18 +# golang.org/x/tools v0.19.0 +## explicit; go 1.19 golang.org/x/tools/cmd/stringer golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector @@ -876,6 +876,7 @@ golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys @@ -997,14 +998,13 @@ k8s.io/klog ## explicit; go 1.12 sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 -# tags.cncf.io/container-device-interface v0.6.2 -## explicit; go 1.19 -tags.cncf.io/container-device-interface/internal/multierror +# tags.cncf.io/container-device-interface v0.7.1 +## explicit; go 1.20 tags.cncf.io/container-device-interface/internal/validation tags.cncf.io/container-device-interface/internal/validation/k8s tags.cncf.io/container-device-interface/pkg/cdi tags.cncf.io/container-device-interface/pkg/parser -# tags.cncf.io/container-device-interface/specs-go v0.6.0 +# tags.cncf.io/container-device-interface/specs-go v0.7.0 ## explicit; go 1.19 tags.cncf.io/container-device-interface/specs-go # github.com/opencontainers/runtime-spec => github.com/opencontainers/runtime-spec v1.1.0 diff --git a/vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go b/vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go deleted file mode 100644 index 07aca4a1d3e..00000000000 --- a/vendor/tags.cncf.io/container-device-interface/internal/multierror/multierror.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright © 2022 The CDI Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package multierror - -import ( - "strings" -) - -// New combines several errors into a single error. Parameters that are nil are -// ignored. If no errors are passed in or all parameters are nil, then the -// result is also nil. -func New(errors ...error) error { - // Filter out nil entries. - numErrors := 0 - for _, err := range errors { - if err != nil { - errors[numErrors] = err - numErrors++ - } - } - if numErrors == 0 { - return nil - } - return multiError(errors[0:numErrors]) -} - -// multiError is the underlying implementation used by New. -// -// Beware that a null multiError is not the same as a nil error. -type multiError []error - -// multiError returns all individual error strings concatenated with "\n" -func (e multiError) Error() string { - var builder strings.Builder - for i, err := range e { - if i > 0 { - _, _ = builder.WriteString("\n") - } - _, _ = builder.WriteString(err.Error()) - } - return builder.String() -} - -// Append returns a new multi error all errors concatenated. Errors that are -// multi errors get flattened, nil is ignored. -func Append(err error, errors ...error) error { - var result multiError - if m, ok := err.(multiError); ok { - result = m - } else if err != nil { - result = append(result, err) - } - - for _, e := range errors { - if e == nil { - continue - } - if m, ok := e.(multiError); ok { - result = append(result, m...) - } else { - result = append(result, e) - } - } - if len(result) == 0 { - return nil - } - return result -} diff --git a/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go b/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go index 5cf63dabf47..fb86c67ac03 100644 --- a/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go +++ b/vendor/tags.cncf.io/container-device-interface/internal/validation/k8s/objectmeta.go @@ -20,10 +20,9 @@ limitations under the License. package k8s import ( + "errors" "fmt" "strings" - - "tags.cncf.io/container-device-interface/internal/multierror" ) // TotalAnnotationSizeLimitB defines the maximum size of all annotations in characters. @@ -31,17 +30,17 @@ const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB // ValidateAnnotations validates that a set of annotations are correctly defined. func ValidateAnnotations(annotations map[string]string, path string) error { - errors := multierror.New() + errs := []error{} for k := range annotations { // The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking. for _, msg := range IsQualifiedName(strings.ToLower(k)) { - errors = multierror.Append(errors, fmt.Errorf("%v.%v is invalid: %v", path, k, msg)) + errs = append(errs, fmt.Errorf("%v.%v is invalid: %v", path, k, msg)) } } if err := ValidateAnnotationsSize(annotations); err != nil { - errors = multierror.Append(errors, fmt.Errorf("%v is too long: %v", path, err)) + errs = append(errs, fmt.Errorf("%v is too long: %v", path, err)) } - return errors + return errors.Join(errs...) } // ValidateAnnotationsSize validates that a set of annotations is not too large. diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go index c807b55fd49..c2f7fe34637 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/cache.go @@ -28,12 +28,11 @@ import ( "github.com/fsnotify/fsnotify" oci "github.com/opencontainers/runtime-spec/specs-go" - "tags.cncf.io/container-device-interface/internal/multierror" cdi "tags.cncf.io/container-device-interface/specs-go" ) // Option is an option to change some aspect of default CDI behavior. -type Option func(*Cache) error +type Option func(*Cache) // Cache stores CDI Specs loaded from Spec directories. type Cache struct { @@ -54,16 +53,27 @@ type Cache struct { // is detected. This option can be used to disable this behavior when a // manually refreshed mode is preferable. func WithAutoRefresh(autoRefresh bool) Option { - return func(c *Cache) error { + return func(c *Cache) { c.autoRefresh = autoRefresh - return nil } } // NewCache creates a new CDI Cache. The cache is populated from a set // of CDI Spec directories. These can be specified using a WithSpecDirs // option. The default set of directories is exposed in DefaultSpecDirs. +// +// Note: +// +// The error returned by this function is always nil and it is only +// returned to maintain API compatibility with consumers. func NewCache(options ...Option) (*Cache, error) { + return newCache(options...), nil +} + +// newCache creates a CDI cache with the supplied options. +// This function allows testing without handling the nil error returned by the +// NewCache function. +func newCache(options ...Option) *Cache { c := &Cache{ autoRefresh: true, watch: &watch{}, @@ -73,7 +83,8 @@ func NewCache(options ...Option) (*Cache, error) { c.Lock() defer c.Unlock() - return c, c.configure(options...) + c.configure(options...) + return c } // Configure applies options to the Cache. Updates and refreshes the @@ -86,18 +97,16 @@ func (c *Cache) Configure(options ...Option) error { c.Lock() defer c.Unlock() - return c.configure(options...) + c.configure(options...) + + return nil } // Configure the Cache. Start/stop CDI Spec directory watch, refresh // the Cache if necessary. -func (c *Cache) configure(options ...Option) error { - var err error - +func (c *Cache) configure(options ...Option) { for _, o := range options { - if err = o(c); err != nil { - return fmt.Errorf("failed to apply cache options: %w", err) - } + o(c) } c.dirErrors = make(map[string]error) @@ -108,8 +117,6 @@ func (c *Cache) configure(options ...Option) error { c.watch.start(&c.Mutex, c.refresh, c.dirErrors) } c.refresh() - - return nil } // Refresh rescans the CDI Spec directories and refreshes the Cache. @@ -125,11 +132,11 @@ func (c *Cache) Refresh() error { } // collect and return cached errors, much like refresh() does it - var result error - for _, errors := range c.errors { - result = multierror.Append(result, errors...) + errs := []error{} + for _, specErrs := range c.errors { + errs = append(errs, errors.Join(specErrs...)) } - return result + return errors.Join(errs...) } // Refresh the Cache by rescanning CDI Spec directories and files. @@ -139,12 +146,10 @@ func (c *Cache) refresh() error { devices = map[string]*Device{} conflicts = map[string]struct{}{} specErrors = map[string][]error{} - result []error ) // collect errors per spec file path and once globally collectError := func(err error, paths ...string) { - result = append(result, err) for _, path := range paths { specErrors[path] = append(specErrors[path], err) } @@ -197,7 +202,11 @@ func (c *Cache) refresh() error { c.devices = devices c.errors = specErrors - return multierror.New(result...) + errs := []error{} + for _, specErrs := range specErrors { + errs = append(errs, errors.Join(specErrs...)) + } + return errors.Join(errs...) } // RefreshIfRequired triggers a refresh if necessary. diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go index 688ddf78b6b..e5b50db1c53 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/container-edits.go @@ -144,6 +144,20 @@ func (e *ContainerEdits) Apply(spec *oci.Spec) error { } } + if e.IntelRdt != nil { + // The specgen is missing functionality to set all parameters so we + // just piggy-back on it to initialize all structs and the copy over. + specgen.SetLinuxIntelRdtClosID(e.IntelRdt.ClosID) + spec.Linux.IntelRdt = e.IntelRdt.ToOCI() + } + + for _, additionalGID := range e.AdditionalGIDs { + if additionalGID == 0 { + continue + } + specgen.AddProcessAdditionalGid(additionalGID) + } + return nil } @@ -171,6 +185,11 @@ func (e *ContainerEdits) Validate() error { return err } } + if e.IntelRdt != nil { + if err := ValidateIntelRdt(e.IntelRdt); err != nil { + return err + } + } return nil } @@ -192,6 +211,10 @@ func (e *ContainerEdits) Append(o *ContainerEdits) *ContainerEdits { e.DeviceNodes = append(e.DeviceNodes, o.DeviceNodes...) e.Hooks = append(e.Hooks, o.Hooks...) e.Mounts = append(e.Mounts, o.Mounts...) + if o.IntelRdt != nil { + e.IntelRdt = o.IntelRdt + } + e.AdditionalGIDs = append(e.AdditionalGIDs, o.AdditionalGIDs...) return e } @@ -202,7 +225,25 @@ func (e *ContainerEdits) isEmpty() bool { if e == nil { return false } - return len(e.Env)+len(e.DeviceNodes)+len(e.Hooks)+len(e.Mounts) == 0 + if len(e.Env) > 0 { + return false + } + if len(e.DeviceNodes) > 0 { + return false + } + if len(e.Hooks) > 0 { + return false + } + if len(e.Mounts) > 0 { + return false + } + if len(e.AdditionalGIDs) > 0 { + return false + } + if e.IntelRdt != nil { + return false + } + return true } // ValidateEnv validates the given environment variables. @@ -280,6 +321,15 @@ func (m *Mount) Validate() error { return nil } +// ValidateIntelRdt validates the IntelRdt configuration. +func ValidateIntelRdt(i *specs.IntelRdt) error { + // ClosID must be a valid Linux filename + if len(i.ClosID) >= 4096 || i.ClosID == "." || i.ClosID == ".." || strings.ContainsAny(i.ClosID, "/\n") { + return errors.New("invalid ClosID") + } + return nil +} + // Ensure OCI Spec hooks are not nil so we can add hooks. func ensureOCIHooks(spec *oci.Spec) { if spec.Hooks == nil { diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/default-cache.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/default-cache.go new file mode 100644 index 00000000000..7886ee517f2 --- /dev/null +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/default-cache.go @@ -0,0 +1,70 @@ +/* + Copyright © 2024 The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cdi + +import ( + "sync" + + oci "github.com/opencontainers/runtime-spec/specs-go" +) + +var ( + defaultCache *Cache + getDefaultOnce sync.Once +) + +func getOrCreateDefaultCache(options ...Option) (*Cache, bool) { + var created bool + getDefaultOnce.Do(func() { + defaultCache = newCache(options...) + created = true + }) + return defaultCache, created +} + +// GetDefaultCache returns the default CDI cache instance. +func GetDefaultCache() *Cache { + cache, _ := getOrCreateDefaultCache() + return cache +} + +// Configure applies options to the default CDI cache. Updates and refreshes +// the default cache if options are not empty. +func Configure(options ...Option) error { + cache, created := getOrCreateDefaultCache(options...) + if len(options) == 0 || created { + return nil + } + return cache.Configure(options...) +} + +// Refresh explicitly refreshes the default CDI cache instance. +func Refresh() error { + return GetDefaultCache().Refresh() +} + +// InjectDevices injects the given qualified devices to the given OCI Spec. +// using the default CDI cache instance to resolve devices. +func InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, error) { + return GetDefaultCache().InjectDevices(ociSpec, devices...) +} + +// GetErrors returns all errors encountered during the last refresh of +// the default CDI cache instance. +func GetErrors() map[string][]error { + return GetDefaultCache().GetErrors() +} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go index 1897ef1fca3..0ea07145522 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/doc.go @@ -29,8 +29,22 @@ // the vast majority of CDI consumers need. The API should be usable both // by OCI runtime clients and runtime implementations. // +// # Default CDI Cache +// +// There is a default CDI cache instance which is always implicitly +// available and instantiated the first time it is referenced directly +// or indirectly. The most frequently used cache functions are available +// as identically named package level functions which operate on the +// default cache instance. Moreover, the registry also operates on the +// same default cache. We plan to deprecate the registry and eventually +// remove it in a future release. +// // # CDI Registry // +// Note: the Registry and its related interfaces are deprecated and will +// be removed in a future version. Please use the default cache and its +// related package-level function instead. +// // The primary interface to interact with CDI devices is the Registry. It // is essentially a cache of all Specs and devices discovered in standard // CDI directories on the host. The registry has two main functionality, diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go index 7f12c777e8f..3113a05a225 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/registry.go @@ -29,6 +29,11 @@ import ( // // The most commonly used Registry functions are for refreshing the // registry and injecting CDI devices into an OCI Spec. +// +// Deprecated: Registry is deprecated and will be removed in a future +// version. Please update your code to use the corresponding package- +// level functions Configure(), Refresh(), InjectDevices(), GetErrors(), +// and GetDefaultCache(). type Registry interface { RegistryResolver RegistryRefresher @@ -54,6 +59,10 @@ type Registry interface { // // GetSpecDirErrors returns any errors related to the configured // Spec directories. +// +// Deprecated: RegistryRefresher is deprecated and will be removed +// in a future version. Please use the default cache and its related +// package-level functions instead. type RegistryRefresher interface { Configure(...Option) error Refresh() error @@ -68,6 +77,10 @@ type RegistryRefresher interface { // InjectDevices takes an OCI Spec and injects into it a set of // CDI devices given by qualified name. It returns the names of // any unresolved devices and an error if injection fails. +// +// Deprecated: RegistryRefresher is deprecated and will be removed +// in a future version. Please use the default cache and its related +// package-level functions instead. type RegistryResolver interface { InjectDevices(spec *oci.Spec, device ...string) (unresolved []string, err error) } @@ -79,6 +92,12 @@ type RegistryResolver interface { // // ListDevices returns a slice with the names of qualified device // known. The returned slice is sorted. +// +// Deprecated: RegistryDeviceDB is deprecated and will be removed +// in a future version. Please use the default cache and its related +// package-level functions instead. +// and will be removed in a future version. Please use the default +// cache and its related package-level functions instead. type RegistryDeviceDB interface { GetDevice(device string) *Device ListDevices() []string @@ -99,6 +118,10 @@ type RegistryDeviceDB interface { // // WriteSpec writes the Spec with the given content and name to the // last Spec directory. +// +// Deprecated: RegistrySpecDB is deprecated and will be removed +// in a future version. Please use the default cache and its related +// package-level functions instead. type RegistrySpecDB interface { ListVendors() []string ListClasses() []string @@ -121,30 +144,35 @@ var ( // GetRegistry returns the CDI registry. If any options are given, those // are applied to the registry. +// +// Deprecated: GetRegistry is deprecated and will be removed in a future +// version. Please use the default cache and its related package-level +// functions instead. func GetRegistry(options ...Option) Registry { - var new bool initOnce.Do(func() { - reg, _ = getRegistry(options...) - new = true + reg = ®istry{GetDefaultCache()} }) - if !new && len(options) > 0 { - reg.Configure(options...) - reg.Refresh() + if len(options) > 0 { + // We don't care about errors here + _ = reg.Configure(options...) } return reg } // DeviceDB returns the registry interface for querying devices. +// +// Deprecated: DeviceDB is deprecated and will be removed in a future +// version. Please use the default cache and its related package-level +// functions instead. func (r *registry) DeviceDB() RegistryDeviceDB { return r } // SpecDB returns the registry interface for querying Specs. +// +// Deprecated: SpecDB is deprecated and will be removed in a future +// version. Please use the default cache and its related package-level +// functions instead. func (r *registry) SpecDB() RegistrySpecDB { return r } - -func getRegistry(options ...Option) (*registry, error) { - c, err := NewCache(options...) - return ®istry{c}, err -} diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go index f339349bbac..09005d690c8 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec-dirs.go @@ -44,13 +44,12 @@ var ( // WithSpecDirs returns an option to override the CDI Spec directories. func WithSpecDirs(dirs ...string) Option { - return func(c *Cache) error { + return func(c *Cache) { specDirs := make([]string, len(dirs)) for i, dir := range dirs { specDirs[i] = filepath.Clean(dir) } c.specDirs = specDirs - return nil } } diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go index 8bd63cc5295..1a0a662b806 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/spec.go @@ -19,7 +19,6 @@ package cdi import ( "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -62,7 +61,7 @@ type Spec struct { // assigned the given priority. If reading or parsing the Spec // data fails ReadSpec returns a nil Spec and an error. func ReadSpec(path string, priority int) (*Spec, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) switch { case os.IsNotExist(err): return nil, err diff --git a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go index a6178127847..e226694458d 100644 --- a/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go +++ b/vendor/tags.cncf.io/container-device-interface/pkg/cdi/version.go @@ -39,6 +39,7 @@ const ( v040 version = "v0.4.0" v050 version = "v0.5.0" v060 version = "v0.6.0" + v070 version = "v0.7.0" // vEarliest is the earliest supported version of the CDI specification vEarliest version = v030 @@ -54,6 +55,7 @@ var validSpecVersions = requiredVersionMap{ v040: requiresV040, v050: requiresV050, v060: requiresV060, + v070: requiresV070, } // MinimumRequiredVersion determines the minimum spec version for the input spec. @@ -118,6 +120,29 @@ func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version { return minVersion } +// requiresV070 returns true if the spec uses v0.7.0 features +func requiresV070(spec *cdi.Spec) bool { + if spec.ContainerEdits.IntelRdt != nil { + return true + } + // The v0.7.0 spec allows additional GIDs to be specified at a spec level. + if len(spec.ContainerEdits.AdditionalGIDs) > 0 { + return true + } + + for _, d := range spec.Devices { + if d.ContainerEdits.IntelRdt != nil { + return true + } + // The v0.7.0 spec allows additional GIDs to be specified at a device level. + if len(d.ContainerEdits.AdditionalGIDs) > 0 { + return true + } + } + + return false +} + // requiresV060 returns true if the spec uses v0.6.0 features func requiresV060(spec *cdi.Spec) bool { // The v0.6.0 spec allows annotations to be specified at a spec level diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/config.go b/vendor/tags.cncf.io/container-device-interface/specs-go/config.go index 4043b858f2f..a311ffa1dbd 100644 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/config.go +++ b/vendor/tags.cncf.io/container-device-interface/specs-go/config.go @@ -3,7 +3,7 @@ package specs import "os" // CurrentVersion is the current version of the Spec. -const CurrentVersion = "0.6.0" +const CurrentVersion = "0.7.0" // Spec is the base configuration for CDI type Spec struct { @@ -25,10 +25,12 @@ type Device struct { // ContainerEdits are edits a container runtime must make to the OCI spec to expose the device. type ContainerEdits struct { - Env []string `json:"env,omitempty"` - DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty"` - Hooks []*Hook `json:"hooks,omitempty"` - Mounts []*Mount `json:"mounts,omitempty"` + Env []string `json:"env,omitempty"` + DeviceNodes []*DeviceNode `json:"deviceNodes,omitempty"` + Hooks []*Hook `json:"hooks,omitempty"` + Mounts []*Mount `json:"mounts,omitempty"` + IntelRdt *IntelRdt `json:"intelRdt,omitempty"` + AdditionalGIDs []uint32 `json:"additionalGids,omitempty"` } // DeviceNode represents a device node that needs to be added to the OCI spec. @@ -60,3 +62,12 @@ type Hook struct { Env []string `json:"env,omitempty"` Timeout *int `json:"timeout,omitempty"` } + +// IntelRdt describes the Linux IntelRdt parameters to set in the OCI spec. +type IntelRdt struct { + ClosID string `json:"closID,omitempty"` + L3CacheSchema string `json:"l3CacheSchema,omitempty"` + MemBwSchema string `json:"memBwSchema,omitempty"` + EnableCMT bool `json:"enableCMT,omitempty"` + EnableMBM bool `json:"enableMBM,omitempty"` +} diff --git a/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go b/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go index 229ad52e0c4..1b4d576bf13 100644 --- a/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go +++ b/vendor/tags.cncf.io/container-device-interface/specs-go/oci.go @@ -36,3 +36,14 @@ func (d *DeviceNode) ToOCI() spec.LinuxDevice { GID: d.GID, } } + +// ToOCI returns the opencontainers runtime Spec LinuxIntelRdt for this IntelRdt config. +func (i *IntelRdt) ToOCI() *spec.LinuxIntelRdt { + return &spec.LinuxIntelRdt{ + ClosID: i.ClosID, + L3CacheSchema: i.L3CacheSchema, + MemBwSchema: i.MemBwSchema, + EnableCMT: i.EnableCMT, + EnableMBM: i.EnableMBM, + } +} From 9dcd1cc9a6b7704ffd8c7d1abf5e18f4a2fe6380 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 11 Apr 2024 10:37:58 +0200 Subject: [PATCH 2/5] chroot: use fileutils.(Le|E)xists Signed-off-by: Giuseppe Scrivano --- chroot/run_freebsd.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/chroot/run_freebsd.go b/chroot/run_freebsd.go index 52763ee970f..535491d6a99 100644 --- a/chroot/run_freebsd.go +++ b/chroot/run_freebsd.go @@ -4,8 +4,10 @@ package chroot import ( + "errors" "fmt" "io" + "io/fs" "os" "os/exec" "path/filepath" @@ -13,6 +15,7 @@ import ( "syscall" "github.com/containers/buildah/pkg/jail" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runtime-spec/specs-go" @@ -178,9 +181,9 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( } } target := filepath.Join(spec.Root.Path, m.Destination) - if _, err := os.Stat(target); err != nil { + if err := fileutils.Exists(target); err != nil { // If the target can't be stat()ted, check the error. - if !os.IsNotExist(err) { + if !errors.Is(err, fs.ErrNotExist) { return undoBinds, fmt.Errorf("examining %q for mounting in mount namespace: %w", target, err) } // The target isn't there yet, so create it, and make a @@ -211,11 +214,11 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( // Do the bind mount. if !srcinfo.IsDir() { logrus.Debugf("emulating file mount %q on %q", m.Source, target) - _, err := os.Stat(target) + err := fileutils.Exists(target) if err == nil { save := saveDir(spec, target) - if _, err := os.Stat(save); err != nil { - if os.IsNotExist(err) { + if err := fileutils.Exists(save); err != nil { + if errors.Is(err, fs.ErrNotExist) { err = os.MkdirAll(save, 0111) } if err != nil { @@ -224,7 +227,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func( removes = append(removes, save) } savePath := filepath.Join(save, filepath.Base(target)) - if _, err := os.Stat(target); err == nil { + if err := fileutils.Exists(target); err == nil { logrus.Debugf("moving %q to %q", target, savePath) if err := os.Rename(target, savePath); err != nil { return undoBinds, fmt.Errorf("moving %q to %q: %w", target, savePath, err) From 7053383792d97d3ef6e5969685e7b965be115bb8 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 11 Apr 2024 10:43:18 +0200 Subject: [PATCH 3/5] buildah: use fileutils.(Le|E)xists Signed-off-by: Giuseppe Scrivano --- common.go | 4 ++-- run_common.go | 6 ++++-- run_linux.go | 3 ++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/common.go b/common.go index ea28be1846f..165c49106f2 100644 --- a/common.go +++ b/common.go @@ -3,7 +3,6 @@ package buildah import ( "context" "io" - "os" "path/filepath" "time" @@ -15,6 +14,7 @@ import ( "github.com/containers/image/v5/types" encconfig "github.com/containers/ocicrypt/config" "github.com/containers/storage" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/unshare" ) @@ -59,7 +59,7 @@ func getSystemContext(store storage.Store, defaults *types.SystemContext, signat if store != nil { if sc.SystemRegistriesConfPath == "" && unshare.IsRootless() { userRegistriesFile := filepath.Join(store.GraphRoot(), "registries.conf") - if _, err := os.Stat(userRegistriesFile); err == nil { + if err := fileutils.Exists(userRegistriesFile); err == nil { sc.SystemRegistriesConfPath = userRegistriesFile } } diff --git a/run_common.go b/run_common.go index 06fbbddd460..fd537851eec 100644 --- a/run_common.go +++ b/run_common.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "io/fs" "net" "os" "os/exec" @@ -40,6 +41,7 @@ import ( "github.com/containers/common/pkg/subscriptions" imageTypes "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" @@ -1414,8 +1416,8 @@ func runSetupBuiltinVolumes(mountLabel, mountPoint, containerDir string, builtin // If we need to, create the directory that we'll use to hold // the volume contents. If we do need to create it, then we'll // need to populate it, too, so make a note of that. - if _, err := os.Stat(volumePath); err != nil { - if !errors.Is(err, os.ErrNotExist) { + if err := fileutils.Exists(volumePath); err != nil { + if !errors.Is(err, fs.ErrNotExist) { return nil, err } logrus.Debugf("setting up built-in volume path at %q for %q", volumePath, volume) diff --git a/run_linux.go b/run_linux.go index 0054d7e0cc9..300f6590bfc 100644 --- a/run_linux.go +++ b/run_linux.go @@ -34,6 +34,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/hooks" hooksExec "github.com/containers/common/pkg/hooks/exec" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/ioutils" "github.com/containers/storage/pkg/lockfile" @@ -1331,7 +1332,7 @@ func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mou // if userns and host ipc bind mount shm if isUserns && !isIpcns { // bind mount /dev/shm when it exists - if _, err := os.Stat("/dev/shm"); err == nil { + if err := fileutils.Exists("/dev/shm"); err == nil { shmMount := specs.Mount{ Source: "/dev/shm", Type: "bind", From 441bdc9274eac5e75c823b2879f2b391f5aef47b Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 11 Apr 2024 10:46:51 +0200 Subject: [PATCH 4/5] pkg/parse: use fileutils.(Le|E)xists Signed-off-by: Giuseppe Scrivano --- pkg/parse/parse.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/pkg/parse/parse.go b/pkg/parse/parse.go index 3ec612c9bb5..fd802775a2f 100644 --- a/pkg/parse/parse.go +++ b/pkg/parse/parse.go @@ -7,6 +7,7 @@ package parse import ( "errors" "fmt" + "io/fs" "net" "os" "path/filepath" @@ -26,6 +27,7 @@ import ( "github.com/containers/common/pkg/parse" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/unshare" storageTypes "github.com/containers/storage/types" @@ -252,14 +254,14 @@ func parseSecurityOpts(securityOpts []string, commonOpts *define.CommonBuildOpti } if commonOpts.SeccompProfilePath == "" { - if _, err := os.Stat(SeccompOverridePath); err == nil { + if err := fileutils.Exists(SeccompOverridePath); err == nil { commonOpts.SeccompProfilePath = SeccompOverridePath } else { - if !errors.Is(err, os.ErrNotExist) { + if !errors.Is(err, fs.ErrNotExist) { return err } - if _, err := os.Stat(SeccompDefaultPath); err != nil { - if !errors.Is(err, os.ErrNotExist) { + if err := fileutils.Exists(SeccompDefaultPath); err != nil { + if !errors.Is(err, fs.ErrNotExist) { return err } } else { @@ -982,7 +984,7 @@ func IDMappingOptionsFromFlagSet(flags *pflag.FlagSet, persistentFlags *pflag.Fl usernsOption.Host = true default: how = strings.TrimPrefix(how, "ns:") - if _, err := os.Stat(how); err != nil { + if err := fileutils.Exists(how); err != nil { return nil, nil, fmt.Errorf("checking %s namespace: %w", string(specs.UserNamespace), err) } logrus.Debugf("setting %q namespace to %q", string(specs.UserNamespace), how) @@ -1077,7 +1079,7 @@ func NamespaceOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name st how = strings.TrimPrefix(how, "ns:") // if not a path we assume it is a comma separated network list, see setupNamespaces() in run_linux.go if filepath.IsAbs(how) || what != string(specs.NetworkNamespace) { - if _, err := os.Stat(how); err != nil { + if err := fileutils.Exists(how); err != nil { return nil, define.NetworkDefault, fmt.Errorf("checking %s namespace: %w", what, err) } } @@ -1243,7 +1245,7 @@ func Secrets(secrets []string) (map[string]define.Secret, error) { if err != nil { return nil, fmt.Errorf("could not parse secrets: %w", err) } - _, err = os.Stat(fullPath) + err = fileutils.Exists(fullPath) if err != nil { return nil, fmt.Errorf("could not parse secrets: %w", err) } @@ -1293,10 +1295,10 @@ func ContainerIgnoreFile(contextDir, path string, containerFiles []string) ([]st containerfile = filepath.Join(contextDir, containerfile) } containerfileIgnore := "" - if _, err := os.Stat(containerfile + ".containerignore"); err == nil { + if err := fileutils.Exists(containerfile + ".containerignore"); err == nil { containerfileIgnore = containerfile + ".containerignore" } - if _, err := os.Stat(containerfile + ".dockerignore"); err == nil { + if err := fileutils.Exists(containerfile + ".dockerignore"); err == nil { containerfileIgnore = containerfile + ".dockerignore" } if containerfileIgnore != "" { From 8bdd6a66d440a7a80c4cd95158c656bfddc6bbf1 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 11 Apr 2024 10:57:30 +0200 Subject: [PATCH 5/5] internal: use fileutils.(Le|E)xists Signed-off-by: Giuseppe Scrivano --- internal/mkcw/attest.go | 3 ++- internal/parse/parse.go | 4 ++-- internal/source/add.go | 3 ++- internal/source/create.go | 4 ++-- internal/source/pull.go | 3 ++- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/mkcw/attest.go b/internal/mkcw/attest.go index 91362d37e2d..400884a9968 100644 --- a/internal/mkcw/attest.go +++ b/internal/mkcw/attest.go @@ -15,6 +15,7 @@ import ( "strings" "github.com/containers/buildah/internal/mkcw/types" + "github.com/containers/storage/pkg/fileutils" "github.com/sirupsen/logrus" ) @@ -224,7 +225,7 @@ func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) } } for _, candidate := range pathsToCheck { - if _, err := os.Lstat(candidate); err == nil { + if err := fileutils.Lexists(candidate); err == nil { var stdout, stderr bytes.Buffer logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate) cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate) diff --git a/internal/parse/parse.go b/internal/parse/parse.go index 89ff7d399af..8a0fa48539f 100644 --- a/internal/parse/parse.go +++ b/internal/parse/parse.go @@ -2,11 +2,11 @@ package parse import ( "fmt" - "os" "path/filepath" "strings" "github.com/containers/common/pkg/parse" + "github.com/containers/storage/pkg/fileutils" specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -15,7 +15,7 @@ func ValidateVolumeMountHostDir(hostDir string) error { if !filepath.IsAbs(hostDir) { return fmt.Errorf("invalid host path, must be an absolute path %q", hostDir) } - if _, err := os.Stat(hostDir); err != nil { + if err := fileutils.Exists(hostDir); err != nil { return err } return nil diff --git a/internal/source/add.go b/internal/source/add.go index 64e944554c1..28506e58f1d 100644 --- a/internal/source/add.go +++ b/internal/source/add.go @@ -10,6 +10,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/fileutils" specV1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -44,7 +45,7 @@ func (o *AddOptions) annotations() (map[string]string, error) { // tar ball. Add attempts to auto-tar and auto-compress only if necessary. func Add(ctx context.Context, sourcePath string, artifactPath string, options AddOptions) error { // Let's first make sure `sourcePath` exists and that we can access it. - if _, err := os.Stat(sourcePath); err != nil { + if err := fileutils.Exists(sourcePath); err != nil { return err } diff --git a/internal/source/create.go b/internal/source/create.go index c335cd0e566..55d4b83bf65 100644 --- a/internal/source/create.go +++ b/internal/source/create.go @@ -3,9 +3,9 @@ package source import ( "context" "fmt" - "os" "time" + "github.com/containers/storage/pkg/fileutils" spec "github.com/opencontainers/image-spec/specs-go" specV1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -32,7 +32,7 @@ func (o *CreateOptions) createdTime() *time.Time { // Create creates an empty source image at the specified `sourcePath`. Note // that `sourcePath` must not exist. func Create(ctx context.Context, sourcePath string, options CreateOptions) error { - if _, err := os.Stat(sourcePath); err == nil { + if err := fileutils.Exists(sourcePath); err == nil { return fmt.Errorf("creating source image: %q already exists", sourcePath) } diff --git a/internal/source/pull.go b/internal/source/pull.go index f8743ddf8f9..7e6f8ed53cd 100644 --- a/internal/source/pull.go +++ b/internal/source/pull.go @@ -12,6 +12,7 @@ import ( "github.com/containers/image/v5/signature" "github.com/containers/image/v5/transports/alltransports" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/fileutils" ) // PullOptions includes data to alter certain knobs when pulling a source @@ -27,7 +28,7 @@ type PullOptions struct { // Pull `imageInput` from a container registry to `sourcePath`. func Pull(ctx context.Context, imageInput string, sourcePath string, options PullOptions) error { - if _, err := os.Stat(sourcePath); err == nil { + if err := fileutils.Exists(sourcePath); err == nil { return fmt.Errorf("%q already exists", sourcePath) }