From 1b00f9e633b7c3c9aff3d5ab47b50d484e1dc5b2 Mon Sep 17 00:00:00 2001 From: Aditya R Date: Wed, 30 Aug 2023 14:08:23 +0530 Subject: [PATCH 01/24] manifest: addCompression use default from containers.conf Replaces: https://github.com/containers/buildah/pull/5014 Signed-off-by: Aditya R Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- cmd/buildah/from.go | 6 ------ cmd/buildah/main.go | 11 ++++++----- cmd/buildah/manifest.go | 2 +- tests/bud.bats | 28 +++++++++++++++++++++++++++- 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/cmd/buildah/from.go b/cmd/buildah/from.go index 3ee6f8adcc7..135f764dd2f 100644 --- a/cmd/buildah/from.go +++ b/cmd/buildah/from.go @@ -13,7 +13,6 @@ import ( "github.com/containers/buildah/pkg/cli" "github.com/containers/buildah/pkg/parse" "github.com/containers/common/pkg/auth" - "github.com/containers/common/pkg/config" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -188,11 +187,6 @@ func onBuild(builder *buildah.Builder, quiet bool) error { } func fromCmd(c *cobra.Command, args []string, iopts fromReply) error { - defaultContainerConfig, err := config.Default() - if err != nil { - return fmt.Errorf("failed to get container config: %w", err) - } - if len(args) == 0 { return errors.New("an image name (or \"scratch\") must be specified") } diff --git a/cmd/buildah/main.go b/cmd/buildah/main.go index 2e4fa0c06c5..2f602c81643 100644 --- a/cmd/buildah/main.go +++ b/cmd/buildah/main.go @@ -59,8 +59,9 @@ var rootCmd = &cobra.Command{ } var ( - globalFlagResults globalFlags - exitCode int + globalFlagResults globalFlags + exitCode int + defaultContainerConfig *config.Config ) func init() { @@ -79,12 +80,12 @@ func init() { defaultStoreDriverOptions = optionSlice } - containerConfig, err := config.Default() + defaultContainerConfig, err = config.Default() if err != nil { logrus.Errorf(err.Error()) os.Exit(1) } - containerConfig.CheckCgroupsAndAdjustConfig() + defaultContainerConfig.CheckCgroupsAndAdjustConfig() cobra.OnInitialize(initConfig) // Disable the implicit `completion` command in cobra. @@ -98,7 +99,7 @@ func init() { rootCmd.PersistentFlags().StringVar(&globalFlagResults.UserShortNameAliasConfPath, "short-name-alias-conf", "", "path to short name alias cache file (not usually used)") rootCmd.PersistentFlags().StringVar(&globalFlagResults.Root, "root", storageOptions.GraphRoot, "storage root dir") rootCmd.PersistentFlags().StringVar(&globalFlagResults.RunRoot, "runroot", storageOptions.RunRoot, "storage state dir") - rootCmd.PersistentFlags().StringVar(&globalFlagResults.CgroupManager, "cgroup-manager", containerConfig.Engine.CgroupManager, "cgroup manager") + rootCmd.PersistentFlags().StringVar(&globalFlagResults.CgroupManager, "cgroup-manager", defaultContainerConfig.Engine.CgroupManager, "cgroup manager") rootCmd.PersistentFlags().StringVar(&globalFlagResults.StorageDriver, "storage-driver", storageOptions.GraphDriverName, "storage-driver") rootCmd.PersistentFlags().StringSliceVar(&globalFlagResults.StorageOpts, "storage-opt", defaultStoreDriverOptions, "storage driver option") rootCmd.PersistentFlags().StringSliceVar(&globalFlagResults.UserNSUID, "userns-uid-map", []string{}, "default `ctrID:hostID:length` UID mapping to use") diff --git a/cmd/buildah/manifest.go b/cmd/buildah/manifest.go index 9568d4bc8a3..b417493b9e8 100644 --- a/cmd/buildah/manifest.go +++ b/cmd/buildah/manifest.go @@ -231,7 +231,7 @@ func init() { flags.StringVar(&manifestPushOpts.compressionFormat, "compression-format", "", "compression format to use") flags.IntVar(&manifestPushOpts.compressionLevel, "compression-level", 0, "compression level to use") flags.StringVarP(&manifestPushOpts.format, "format", "f", "", "manifest type (oci or v2s2) to attempt to use when pushing the manifest list (default is manifest type of source)") - flags.StringSliceVar(&manifestPushOpts.addCompression, "add-compression", nil, "add instances with selected compression while pushing") + flags.StringArrayVar(&manifestPushOpts.addCompression, "add-compression", defaultContainerConfig.Engine.AddCompression.Get(), "add instances with selected compression while pushing") flags.BoolVarP(&manifestPushOpts.removeSignatures, "remove-signatures", "", false, "don't copy signatures when pushing images") flags.StringVar(&manifestPushOpts.signBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`") flags.StringVar(&manifestPushOpts.signaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") diff --git a/tests/bud.bats b/tests/bud.bats index 878a1597a0a..ae299852648 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -12,11 +12,37 @@ load helpers } @test "bud: build manifest list and --add-compression zstd" { + start_registry + run_buildah login --tls-verify=false --authfile ${TEST_SCRATCH_DIR}/test.auth --username testuser --password testpassword localhost:${REGISTRY_PORT} + run_buildah build $WITH_POLICY_JSON -t image1 --platform linux/amd64 -f $BUDFILES/dockerfile/Dockerfile + run_buildah build $WITH_POLICY_JSON -t image2 --platform linux/arm64 -f $BUDFILES/dockerfile/Dockerfile + + run_buildah manifest create foo + run_buildah manifest add foo image1 + run_buildah manifest add foo image2 + + run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list + + run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list + list="$output" + + validate_instance_compression "0" "$list" "amd64" "gzip" + validate_instance_compression "1" "$list" "arm64" "gzip" + validate_instance_compression "2" "$list" "amd64" "zstd" + validate_instance_compression "3" "$list" "arm64" "zstd" +} + +@test "bud: build manifest list and --add-compression with containers.conf" { local contextdir=${TEST_SCRATCH_DIR}/bud/platform mkdir -p $contextdir cat > $contextdir/Dockerfile1 << _EOF FROM alpine +_EOF + + cat > $contextdir/containers.conf << _EOF +[engine] +add_compression = ["zstd"] _EOF start_registry @@ -28,7 +54,7 @@ _EOF run_buildah manifest add foo image1 run_buildah manifest add foo image2 - run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --add-compression zstd --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list + CONTAINERS_CONF=$contextdir/containers.conf run_buildah manifest push $WITH_POLICY_JSON --authfile ${TEST_SCRATCH_DIR}/test.auth --all --tls-verify=false foo docker://localhost:${REGISTRY_PORT}/list run_buildah manifest inspect --authfile ${TEST_SCRATCH_DIR}/test.auth --tls-verify=false localhost:${REGISTRY_PORT}/list list="$output" From 413a3595916f9cee15c12f2f363178c78d13c183 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Tue, 28 Nov 2023 20:00:39 -0500 Subject: [PATCH 02/24] Ignore errors if label.Relabel returns ENOSUP This is a common mistake by users and is ignored in some places but not everywhere. This change will help this to be ignored everwhere. Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- internal/mkcw/embed/entrypoint_amd64.gz | Bin 327 -> 405 bytes run_common.go | 24 +++++++++++++++++------- run_linux.go | 7 +++---- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/internal/mkcw/embed/entrypoint_amd64.gz b/internal/mkcw/embed/entrypoint_amd64.gz index f8218a05ef74ee3dfdcdba928caa037f3c3fa20e..980a5a65806dc6281745ffb689858c4e8dbc88c4 100755 GIT binary patch literal 405 zcmV;G0c!pqiwFp%I$vb~17&V>a(QrXX>N1??V7(%!!QuWFQF|J4D<<*2S#KeCI*DE z0F{W45byvlF=^CdSBWdi#4GfXDhBc-ya$f|gf<{im4W4clJn)e+{KQ!==^#fUxYyb zo)FH!xL#y@wEpZ!J>-!?F$ya(QrXX>N1??b@+Ugg_7m;Ngz5&_XQj<_RdQER3Pi zTq2E$FR%y)n?p$0y=dpF84Gz5-^0CyB^B8K->h5~B{?nrZ00000 z006)bJrq*0d=h!a=0}<-nO9lLy5=O~W>c|HEcmgmRx-^hEk()Cb+ayOk@7~#D(6xr zYcm)g6NRc!y3rz`P-ici!lq7z7Qb=M6YAdm5AX1Y?*+ODC-dH Date: Tue, 12 Dec 2023 12:18:20 -0500 Subject: [PATCH 03/24] mkcw: populate the rootfs using an overlay When using the working container's rootfs to populate a plaintext disk image with mkfs, instead of writing .krun_config.json to the rootfs and then removing it afterward (since we don't want it to show up if the same working container is later committed to non confidential-workload image), mount an overlay filesystem using a temporary directory as the upper and the rootfs as the lower, create the .krun_config.json file in the overlay filesystem, and use the overlay filesystem as the source directory for mkfs. Add the necessary stubs to allow pkg/overlay to at least compile on non-Linux systems. Change the naming scheme for a test so that the path names it uses for temporary directories don't include "," or "=", which can confuse the kernel. Creating confidential workload images will now only be possible on Linux systems, but we exec'd out to sevctl to read platform certificates, and that requires kernel support with vendor firmware, so I don't know that anyone will actually be impacted by the change. Teach pkg/overlay.MountWithOptions() to accept `nil` as a pointer to a struct parameter that is otherwise optional. Signed-off-by: Nalin Dahyabhai Signed-off-by: tomsweeneyredhat --- convertcw.go | 1 + convertcw_test.go | 2 +- define/types.go | 2 +- image.go | 17 ++++++ internal/mkcw/archive.go | 85 ++++++++++++++++++++++++++++-- pkg/overlay/overlay.go | 4 +- pkg/overlay/overlay_freebsd.go | 3 ++ pkg/overlay/overlay_linux.go | 3 ++ pkg/overlay/overlay_unsupported.go | 20 +++++++ tests/mkcw.bats | 11 ++-- 10 files changed, 135 insertions(+), 13 deletions(-) create mode 100644 pkg/overlay/overlay_unsupported.go diff --git a/convertcw.go b/convertcw.go index 85576f425ab..ede4f24c86b 100644 --- a/convertcw.go +++ b/convertcw.go @@ -171,6 +171,7 @@ func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, sto Slop: options.Slop, FirmwareLibrary: options.FirmwareLibrary, Logger: logger, + GraphOptions: store.GraphOptions(), } rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions) if err != nil { diff --git a/convertcw_test.go b/convertcw_test.go index 7e5263935e0..c70c18991bf 100644 --- a/convertcw_test.go +++ b/convertcw_test.go @@ -72,7 +72,7 @@ func TestCWConvertImage(t *testing.T) { for _, status := range []int{http.StatusOK, http.StatusInternalServerError} { for _, ignoreChainRetrievalErrors := range []bool{false, true} { for _, ignoreAttestationErrors := range []bool{false, true} { - t.Run(fmt.Sprintf("status=%d,ignoreChainRetrievalErrors=%v,ignoreAttestationErrors=%v", status, ignoreChainRetrievalErrors, ignoreAttestationErrors), func(t *testing.T) { + t.Run(fmt.Sprintf("status~%d~ignoreChainRetrievalErrors~%v~ignoreAttestationErrors~%v", status, ignoreChainRetrievalErrors, ignoreAttestationErrors), func(t *testing.T) { // create a per-test Store object storeOptions := storage.StoreOptions{ GraphRoot: t.TempDir(), diff --git a/define/types.go b/define/types.go index 9b908984eaf..250dfbcbbec 100644 --- a/define/types.go +++ b/define/types.go @@ -121,7 +121,7 @@ type ConfidentialWorkloadOptions struct { AttestationURL string CPUs int Memory int - TempDir string + TempDir string // used for the temporary plaintext copy of the disk image TeeType TeeType IgnoreAttestationErrors bool WorkloadID string diff --git a/image.go b/image.go index 7318e04bdac..163699e0540 100644 --- a/image.go +++ b/image.go @@ -171,6 +171,22 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo if err := json.Unmarshal(i.oconfig, &image); err != nil { return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err) } + if options.TempDir == "" { + cdir, err := i.store.ContainerDirectory(i.containerID) + if err != nil { + return nil, fmt.Errorf("getting the per-container data directory for %q: %w", i.containerID, err) + } + tempdir, err := os.MkdirTemp(cdir, "buildah-rootfs") + if err != nil { + return nil, fmt.Errorf("creating a temporary data directory to hold a rootfs image for %q: %w", i.containerID, err) + } + defer func() { + if err := os.RemoveAll(tempdir); err != nil { + logrus.Warnf("removing temporary directory %q: %v", tempdir, err) + } + }() + options.TempDir = tempdir + } mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) if err != nil { return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err) @@ -186,6 +202,7 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo DiskEncryptionPassphrase: options.DiskEncryptionPassphrase, Slop: options.Slop, FirmwareLibrary: options.FirmwareLibrary, + GraphOptions: i.store.GraphOptions(), } rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions) if err != nil { diff --git a/internal/mkcw/archive.go b/internal/mkcw/archive.go index a0677e42650..6caea17df33 100644 --- a/internal/mkcw/archive.go +++ b/internal/mkcw/archive.go @@ -17,7 +17,12 @@ import ( "strings" "time" + "github.com/containers/buildah/internal/tmpdir" + "github.com/containers/buildah/pkg/overlay" "github.com/containers/luksy" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/mount" + "github.com/containers/storage/pkg/system" "github.com/docker/docker/pkg/ioutils" "github.com/docker/go-units" digest "github.com/opencontainers/go-digest" @@ -48,6 +53,7 @@ type ArchiveOptions struct { DiskEncryptionPassphrase string FirmwareLibrary string Logger *logrus.Logger + GraphOptions []string // passed in from a storage Store, probably } type chainRetrievalError struct { @@ -107,6 +113,9 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC Memory: memory, AttestationURL: options.AttestationURL, } + if options.TempDir == "" { + options.TempDir = tmpdir.GetTempDir() + } // Do things which are specific to the type of TEE we're building for. var chainBytes []byte @@ -165,6 +174,77 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC workloadConfig.TeeData = string(encodedTeeData) } + // We're going to want to add some content to the rootfs, so set up an + // overlay that uses it as a lower layer so that we can write to it. + st, err := system.Stat(path) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("reading information about the container root filesystem: %w", err) + } + // Create a temporary directory to hold all of this. Use tmpdir.GetTempDir() + // instead of the passed-in location, which a crafty caller might have put in an + // overlay filesystem in storage because there tends to be more room there than + // in, say, /var/tmp, and the plaintext disk image, which we put in the passed-in + // location, can get quite large. + rootfsParentDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildah-rootfs") + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("setting up parent for container root filesystem: %w", err) + } + defer func() { + if err := os.RemoveAll(rootfsParentDir); err != nil { + logger.Warnf("cleaning up parent for container root filesystem: %v", err) + } + }() + // Create a mountpoint for the new overlay, which we'll use as the rootfs. + rootfsDir := filepath.Join(rootfsParentDir, "rootfs") + if err := idtools.MkdirAndChown(rootfsDir, fs.FileMode(st.Mode()), idtools.IDPair{UID: int(st.UID()), GID: int(st.GID())}); err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("creating mount target for container root filesystem: %w", err) + } + defer func() { + if err := os.Remove(rootfsDir); err != nil { + logger.Warnf("removing mount target for container root filesystem: %v", err) + } + }() + // Create a directory to hold all of the overlay package's working state. + tempDir := filepath.Join(rootfsParentDir, "tmp") + if err = os.Mkdir(tempDir, 0o700); err != nil { + return nil, WorkloadConfig{}, err + } + // Create some working state in there. + overlayTempDir, err := overlay.TempDir(tempDir, int(st.UID()), int(st.GID())) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("setting up mount of container root filesystem: %w", err) + } + defer func() { + if err := overlay.RemoveTemp(overlayTempDir); err != nil { + logger.Warnf("cleaning up mount of container root filesystem: %v", err) + } + }() + // Create a mount point using that working state. + rootfsMount, err := overlay.Mount(overlayTempDir, path, rootfsDir, 0, 0, options.GraphOptions) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("setting up support for overlay of container root filesystem: %w", err) + } + defer func() { + if err := overlay.Unmount(overlayTempDir); err != nil { + logger.Warnf("unmounting support for overlay of container root filesystem: %v", err) + } + }() + // Follow through on the overlay or bind mount, whatever the overlay package decided + // to leave to us to do. + rootfsMountOptions := strings.Join(rootfsMount.Options, ",") + logrus.Debugf("mounting %q to %q as %q with options %v", rootfsMount.Source, rootfsMount.Destination, rootfsMount.Type, rootfsMountOptions) + if err := mount.Mount(rootfsMount.Source, rootfsMount.Destination, rootfsMount.Type, rootfsMountOptions); err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("mounting overlay of container root filesystem: %w", err) + } + defer func() { + logrus.Debugf("unmounting %q", rootfsMount.Destination) + if err := mount.Unmount(rootfsMount.Destination); err != nil { + logger.Warnf("unmounting overlay of container root filesystem: %v", err) + } + }() + // Pretend that we didn't have to do any of the preceding. + path = rootfsDir + // Write part of the config blob where the krun init process will be // looking for it. The oci2cw tool used `buildah inspect` output, but // init is just looking for fields that have the right names in any @@ -178,11 +258,6 @@ func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadC if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil { return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err) } - defer func() { - if err := os.Remove(krunConfigPath); err != nil { - logger.Warnf("removing krun configuration file: %v", err) - } - }() // Encode the workload config, in case it fails for any reason. cleanedUpWorkloadConfig := workloadConfig diff --git a/pkg/overlay/overlay.go b/pkg/overlay/overlay.go index e416ecd780e..bbcc8eac695 100644 --- a/pkg/overlay/overlay.go +++ b/pkg/overlay/overlay.go @@ -6,6 +6,7 @@ import ( "os/exec" "path/filepath" "strings" + "syscall" "errors" @@ -14,7 +15,6 @@ import ( "github.com/containers/storage/pkg/unshare" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" ) // Options type holds various configuration options for overlay @@ -180,7 +180,7 @@ func Unmount(contentDir string) error { } // Ignore EINVAL as the specified merge dir is not a mount point - if err := unix.Unmount(mergeDir, 0); err != nil && !errors.Is(err, os.ErrNotExist) && err != unix.EINVAL { + if err := system.Unmount(mergeDir); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.EINVAL) { return fmt.Errorf("unmount overlay %s: %w", mergeDir, err) } return nil diff --git a/pkg/overlay/overlay_freebsd.go b/pkg/overlay/overlay_freebsd.go index e814a327c7a..b064ec57837 100644 --- a/pkg/overlay/overlay_freebsd.go +++ b/pkg/overlay/overlay_freebsd.go @@ -18,6 +18,9 @@ import ( // But allows api to set custom workdir, upperdir and other overlay options // Following API is being used by podman at the moment func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + if opts == nil { + opts = &Options{} + } if opts.ReadOnly { // Read-only overlay mounts can be simulated with nullfs mount.Source = source diff --git a/pkg/overlay/overlay_linux.go b/pkg/overlay/overlay_linux.go index 9bd72bc2406..46d0c44aa1f 100644 --- a/pkg/overlay/overlay_linux.go +++ b/pkg/overlay/overlay_linux.go @@ -17,6 +17,9 @@ import ( // But allows api to set custom workdir, upperdir and other overlay options // Following API is being used by podman at the moment func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + if opts == nil { + opts = &Options{} + } mergeDir := filepath.Join(contentDir, "merge") // Create overlay mount options for rw/ro. diff --git a/pkg/overlay/overlay_unsupported.go b/pkg/overlay/overlay_unsupported.go new file mode 100644 index 00000000000..538db65e0f7 --- /dev/null +++ b/pkg/overlay/overlay_unsupported.go @@ -0,0 +1,20 @@ +//go:build !freebsd && !linux +// +build !freebsd,!linux + +package overlay + +import ( + "fmt" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, err error) { + return mount, fmt.Errorf("read/write overlay mounts not supported on %q", runtime.GOOS) +} diff --git a/tests/mkcw.bats b/tests/mkcw.bats index 1aa32c1c9c8..c1a185ef59a 100644 --- a/tests/mkcw.bats +++ b/tests/mkcw.bats @@ -49,12 +49,15 @@ function mkcw_check_image() { skip "cryptsetup not found" fi _prefetch busybox + _prefetch bash echo -n mkcw-convert > "$TEST_SCRATCH_DIR"/key + # image has one layer, check with all-lower-case TEE type name run_buildah mkcw --ignore-attestation-errors --type snp --passphrase=mkcw-convert busybox busybox-cw mkcw_check_image busybox-cw - run_buildah mkcw --ignore-attestation-errors --type SNP --passphrase=mkcw-convert busybox busybox-cw - mkcw_check_image busybox-cw + # image has multiple layers, check with all-upper-case TEE type name + run_buildah mkcw --ignore-attestation-errors --type SNP --passphrase=mkcw-convert bash bash-cw + mkcw_check_image bash-cw } @test "mkcw-commit" { @@ -63,10 +66,10 @@ function mkcw_check_image() { if ! which cryptsetup > /dev/null 2> /dev/null ; then skip "cryptsetup not found" fi - _prefetch busybox + _prefetch bash echo -n "mkcw commit" > "$TEST_SCRATCH_DIR"/key - run_buildah from busybox + run_buildah from bash ctrID="$output" run_buildah commit --iidfile "$TEST_SCRATCH_DIR"/iid --cw type=SEV,ignore_attestation_errors,passphrase="mkcw commit" "$ctrID" mkcw_check_image $(cat "$TEST_SCRATCH_DIR"/iid) From d49e484126a0c467f2e6f11c0be7bee2a097fa02 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 12 Dec 2023 14:54:42 -0500 Subject: [PATCH 04/24] commit: add a --add-file flag Add a flag to `buildah commit` which allows adding arbitrary files to the image while we're committing it. When not squashing, they'll take the form of a second new layer. Signed-off-by: Nalin Dahyabhai Signed-off-by: tomsweeneyredhat --- cmd/buildah/commit.go | 26 +++++ commit.go | 6 ++ docs/buildah-commit.1.md | 8 ++ image.go | 208 ++++++++++++++++++++++++++++++++------- tests/commit.bats | 62 ++++++++++++ 5 files changed, 273 insertions(+), 37 deletions(-) diff --git a/cmd/buildah/commit.go b/cmd/buildah/commit.go index 983c569b6e7..eca8608b023 100644 --- a/cmd/buildah/commit.go +++ b/cmd/buildah/commit.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "strings" "time" "github.com/containers/buildah" @@ -49,6 +50,7 @@ type commitInputOptions struct { encryptionKeys []string encryptLayers []int unsetenvs []string + addFile []string } func init() { @@ -77,6 +79,7 @@ func commitListFlagSet(cmd *cobra.Command, opts *commitInputOptions) { flags := cmd.Flags() flags.SetInterspersed(false) + flags.StringArrayVar(&opts.addFile, "add-file", nil, "add contents of a file to the image at a specified path (`source:destination`)") flags.StringVar(&opts.authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") _ = cmd.RegisterFlagCompletionFunc("authfile", completion.AutocompleteDefault) flags.StringVar(&opts.blobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing") @@ -223,6 +226,28 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error } } + var addFiles map[string]string + if len(iopts.addFile) > 0 { + addFiles = make(map[string]string) + for _, spec := range iopts.addFile { + specSlice := strings.SplitN(spec, ":", 2) + if len(specSlice) == 1 { + specSlice = []string{specSlice[0], specSlice[0]} + } + if len(specSlice) != 2 { + return fmt.Errorf("parsing add-file argument %q: expected 1 or 2 parts, got %d", spec, len(strings.SplitN(spec, ":", 2))) + } + st, err := os.Stat(specSlice[0]) + if err != nil { + return fmt.Errorf("parsing add-file argument %q: source %q: %w", spec, specSlice[0], err) + } + if st.IsDir() { + return fmt.Errorf("parsing add-file argument %q: source %q is not a regular file", spec, specSlice[0]) + } + addFiles[specSlice[1]] = specSlice[0] + } + } + options := buildah.CommitOptions{ PreferredManifestType: format, Manifest: iopts.manifest, @@ -239,6 +264,7 @@ func commitCmd(c *cobra.Command, args []string, iopts commitInputOptions) error UnsetEnvs: iopts.unsetenvs, OverrideChanges: iopts.changes, OverrideConfig: overrideConfig, + ExtraImageContent: addFiles, } exclusiveFlags := 0 if c.Flag("reference-time").Changed { diff --git a/commit.go b/commit.go index ef55e5419a7..222f969a0f0 100644 --- a/commit.go +++ b/commit.go @@ -118,6 +118,12 @@ type CommitOptions struct { // to the configuration of the image that is being committed, after // OverrideConfig is applied. OverrideChanges []string + // ExtraImageContent is a map which describes additional content to add + // to the committted image. The map's keys are filesystem paths in the + // image and the corresponding values are the paths of files whose + // contents will be used in their place. The contents will be owned by + // 0:0 and have mode 0644. Currently only accepts regular files. + ExtraImageContent map[string]string } var ( diff --git a/docs/buildah-commit.1.md b/docs/buildah-commit.1.md index 2bad2149f82..5e1b88eee74 100644 --- a/docs/buildah-commit.1.md +++ b/docs/buildah-commit.1.md @@ -19,6 +19,14 @@ The image ID of the image that was created. On error, 1 is returned and errno i ## OPTIONS +**--add-file** *source[:destination]* + +Read the contents of the file `source` and add it to the committed image as a +file at `destination`. If `destination` is not specified, the path of `source` +will be used. The new file will be owned by UID 0, GID 0, have 0644 +permissions, and be given a current timestamp unless the **--timestamp** option +is also specified. This option can be specified multiple times. + **--authfile** *path* Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. diff --git a/image.go b/image.go index 163699e0540..d1c3a3c11d7 100644 --- a/image.go +++ b/image.go @@ -45,9 +45,9 @@ const ( Dockerv2ImageManifest = define.Dockerv2ImageManifest ) -// ExtractRootfsOptions is consumed by ExtractRootfs() which allows -// users to preserve nature of various modes like setuid, setgid and xattrs -// over the extracted file system objects. +// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to +// control whether various information like the like setuid and setgid bits and +// xattrs are preserved when extracting file system objects. type ExtractRootfsOptions struct { StripSetuidBit bool // strip the setuid bit off of items being extracted. StripSetgidBit bool // strip the setgid bit off of items being extracted. @@ -82,6 +82,7 @@ type containerImageRef struct { postEmptyLayers []v1.History overrideChanges []string overrideConfig *manifest.Schema2Config + extraImageContent map[string]string } type blobLayerInfo struct { @@ -204,6 +205,9 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo FirmwareLibrary: options.FirmwareLibrary, GraphOptions: i.store.GraphOptions(), } + if len(i.extraImageContent) > 0 { + logrus.Warnf("ignoring extra requested content %v, not implemented (yet)", i.extraImageContent) + } rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions) if err != nil { if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil { @@ -228,9 +232,8 @@ func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWo } // Extract the container's whole filesystem as if it were a single layer. -// Takes ExtractRootfsOptions as argument which allows caller to configure -// preserve nature of setuid,setgid,sticky and extended attributes -// on extracted rootfs. +// The ExtractRootfsOptions control whether or not to preserve setuid and +// setgid bits and extended attributes on contents. func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) { var uidMap, gidMap []idtools.IDMap mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) @@ -241,6 +244,27 @@ func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadClo errChan := make(chan error, 1) go func() { defer close(errChan) + if len(i.extraImageContent) > 0 { + // Abuse the tar format and _prepend_ the synthesized + // data items to the archive we'll get from + // copier.Get(), in a way that looks right to a reader + // as long as we DON'T Close() the tar Writer. + filename, _, _, err := i.makeExtraImageContentDiff(false) + if err != nil { + errChan <- err + return + } + file, err := os.Open(filename) + if err != nil { + errChan <- err + return + } + defer file.Close() + if _, err = io.Copy(pipeWriter, file); err != nil { + errChan <- err + return + } + } if i.idMappingOptions != nil { uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap) } @@ -311,8 +335,8 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, dimage.RootFS.Type = docker.TypeLayers dimage.RootFS.DiffIDs = []digest.Digest{} // Only clear the history if we're squashing, otherwise leave it be so - // that we can append entries to it. Clear the parent, too, we no - // longer include its layers and history. + // that we can append entries to it. Clear the parent, too, to reflect + // that we no longer include its layers and history. if i.confidentialWorkload.Convert || i.squash || i.omitHistory { dimage.Parent = "" dimage.History = []docker.V2S2History{} @@ -385,8 +409,9 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System if err != nil { return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) } - // Walk the list of parent layers, prepending each as we go. If we're squashing, - // stop at the layer ID of the top layer, which we won't really be using anyway. + // Walk the list of parent layers, prepending each as we go. If we're squashing + // or making a confidential workload, we're only producing one layer, so stop at + // the layer ID of the top layer, which we won't really be using anyway. for layer != nil { layers = append(append([]string{}, layerID), layers...) layerID = layer.Parent @@ -399,6 +424,14 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err) } } + layer = nil + + // If we're slipping in a synthesized layer, we need to add a placeholder for it + // to the list. + const synthesizedLayerID = "(synthesized layer)" + if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash { + layers = append(layers, synthesizedLayerID) + } logrus.Debugf("layer list: %q", layers) // Make a temporary directory to hold blobs. @@ -424,6 +457,8 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } // Extract each layer and compute its digests, both compressed (if requested) and uncompressed. + var extraImageContentDiff string + var extraImageContentDiffDigest digest.Digest blobLayers := make(map[digest.Digest]blobLayerInfo) for _, layerID := range layers { what := fmt.Sprintf("layer %q", layerID) @@ -434,16 +469,32 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System omediaType := v1.MediaTypeImageLayer dmediaType := docker.V2S2MediaTypeUncompressedLayer // Look up this layer. - layer, err := i.store.Layer(layerID) - if err != nil { - return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) + var layerUncompressedDigest digest.Digest + var layerUncompressedSize int64 + if layerID != synthesizedLayerID { + layer, err := i.store.Layer(layerID) + if err != nil { + return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err) + } + layerID = layer.ID + layerUncompressedDigest = layer.UncompressedDigest + layerUncompressedSize = layer.UncompressedSize + } else { + diffFilename, digest, size, err := i.makeExtraImageContentDiff(true) + if err != nil { + return nil, fmt.Errorf("unable to generate layer for additional content: %w", err) + } + extraImageContentDiff = diffFilename + extraImageContentDiffDigest = digest + layerUncompressedDigest = digest + layerUncompressedSize = size } // If we already know the digest of the contents of parent // layers, reuse their blobsums, diff IDs, and sizes. - if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { - layerBlobSum := layer.UncompressedDigest - layerBlobSize := layer.UncompressedSize - diffID := layer.UncompressedDigest + if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" { + layerBlobSum := layerUncompressedDigest + layerBlobSize := layerUncompressedSize + diffID := layerUncompressedDigest // Note this layer in the manifest, using the appropriate blobsum. olayerDescriptor := v1.Descriptor{ MediaType: omediaType, @@ -461,7 +512,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID) dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID) blobLayers[diffID] = blobLayerInfo{ - ID: layer.ID, + ID: layerID, Size: layerBlobSize, } continue @@ -491,15 +542,22 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System return nil, err } } else { - // If we're up to the final layer, but we don't want to - // include a diff for it, we're done. - if i.emptyLayer && layerID == i.layerID { - continue - } - // Extract this layer, one of possibly many. - rc, err = i.store.Diff("", layerID, diffOptions) - if err != nil { - return nil, fmt.Errorf("extracting %s: %w", what, err) + if layerID != synthesizedLayerID { + // If we're up to the final layer, but we don't want to + // include a diff for it, we're done. + if i.emptyLayer && layerID == i.layerID { + continue + } + // Extract this layer, one of possibly many. + rc, err = i.store.Diff("", layerID, diffOptions) + if err != nil { + return nil, fmt.Errorf("extracting %s: %w", what, err) + } + } else { + // Slip in additional content as an additional layer. + if rc, err = os.Open(extraImageContentDiff); err != nil { + return nil, err + } } } srcHasher := digest.Canonical.Digester() @@ -641,20 +699,19 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } } - // Calculate base image history for special scenarios - // when base layers does not contains any history. - // We will ignore sanity checks if baseImage history is null - // but still add new history for docker parity. - baseImageHistoryLen := len(oimage.History) // Only attempt to append history if history was not disabled explicitly. if !i.omitHistory { + // Keep track of how many entries the base image's history had + // before we started adding to it. + baseImageHistoryLen := len(oimage.History) appendHistory(i.preEmptyLayers) created := time.Now().UTC() if i.created != nil { created = (*i.created).UTC() } comment := i.historyComment - // Add a comment for which base image is being used + // Add a comment indicating which base image was used, if it wasn't + // just an image ID. if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID { comment += "FROM " + i.fromImageName } @@ -676,10 +733,24 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System dimage.History = append(dimage.History, dnews) appendHistory(i.postEmptyLayers) - // Sanity check that we didn't just create a mismatch between non-empty layers in the - // history and the number of diffIDs. Following sanity check is ignored if build history - // is disabled explicitly by the user. - // Disable sanity check when baseImageHistory is null for docker parity + // Add a history entry for the extra image content if we added a layer for it. + if extraImageContentDiff != "" { + createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded()) + onews := v1.History{ + Created: &created, + CreatedBy: createdBy, + } + oimage.History = append(oimage.History, onews) + dnews := docker.V2S2History{ + Created: created, + CreatedBy: createdBy, + } + dimage.History = append(dimage.History, dnews) + } + + // Confidence check that we didn't just create a mismatch between non-empty layers in the + // history and the number of diffIDs. Only applicable if the base image (if there was + // one) provided us at least one entry to use as a starting point. if baseImageHistoryLen != 0 { expectedDiffIDs := expectedOCIDiffIDs(oimage) if len(oimage.RootFS.DiffIDs) != expectedDiffIDs { @@ -876,6 +947,68 @@ func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil } +// makeExtraImageContentDiff creates an archive file containing the contents of +// files named in i.extraImageContent. The footer that marks the end of the +// archive may be omitted. +func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (string, digest.Digest, int64, error) { + cdir, err := i.store.ContainerDirectory(i.containerID) + if err != nil { + return "", "", -1, err + } + diff, err := os.CreateTemp(cdir, "extradiff") + if err != nil { + return "", "", -1, err + } + defer diff.Close() + digester := digest.Canonical.Digester() + counter := ioutils.NewWriteCounter(digester.Hash()) + tw := tar.NewWriter(io.MultiWriter(diff, counter)) + created := time.Now() + if i.created != nil { + created = *i.created + } + for path, contents := range i.extraImageContent { + if err := func() error { + content, err := os.Open(contents) + if err != nil { + return err + } + defer content.Close() + st, err := content.Stat() + if err != nil { + return err + } + if err := tw.WriteHeader(&tar.Header{ + Name: path, + Typeflag: tar.TypeReg, + Mode: 0o644, + ModTime: created, + Size: st.Size(), + }); err != nil { + return err + } + if _, err := io.Copy(tw, content); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil + }(); err != nil { + return "", "", -1, err + } + } + if !includeFooter { + return diff.Name(), "", -1, err + } + tw.Close() + return diff.Name(), digester.Digest(), counter.Count, err +} + +// makeContainerImageRef creates a containers/image/v5/types.ImageReference +// which is mainly used for representing the working container as a source +// image that can be copied, which is how we commit container to create the +// image. func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) { var name reference.Named container, err := b.store.Container(b.ContainerID) @@ -952,6 +1085,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR postEmptyLayers: b.AppendedEmptyLayers, overrideChanges: options.OverrideChanges, overrideConfig: options.OverrideConfig, + extraImageContent: copyStringStringMap(options.ExtraImageContent), } return ref, nil } diff --git a/tests/commit.bats b/tests/commit.bats index 8caa73b3d53..48c4bace3f2 100644 --- a/tests/commit.bats +++ b/tests/commit.bats @@ -326,3 +326,65 @@ load helpers # instead of name/name because the names are gone assert "$output" =~ $(id -u)/$(id -g) } + +@test "commit-with-extra-files" { + _prefetch busybox + run_buildah from --quiet --pull=false $WITH_POLICY_JSON busybox + cid=$output + createrandom ${BATS_TMPDIR}/randomfile1 + createrandom ${BATS_TMPDIR}/randomfile2 + + for method in --squash=false --squash=true ; do + run_buildah commit $method --add-file ${BATS_TMPDIR}/randomfile1:/randomfile1 $cid with-random-1 + run_buildah commit $method --add-file ${BATS_TMPDIR}/randomfile2:/in-a-subdir/randomfile2 $cid with-random-2 + run_buildah commit $method --add-file ${BATS_TMPDIR}/randomfile1:/randomfile1 --add-file ${BATS_TMPDIR}/randomfile2:/in-a-subdir/randomfile2 $cid with-random-both + + # first one should have the first file and not the second, and the shell should be there + run_buildah from --quiet --pull=false $WITH_POLICY_JSON with-random-1 + cid=$output + run_buildah mount $cid + mountpoint=$output + test -s $mountpoint/bin/sh || test -L $mountpoint/bin/sh + cmp ${BATS_TMPDIR}/randomfile1 $mountpoint/randomfile1 + run stat -c %u:%g $mountpoint + [ $status -eq 0 ] + rootowner=$output + run stat -c %u:%g:%A $mountpoint/randomfile1 + [ $status -eq 0 ] + assert ${rootowner}:-rw-r--r-- + ! test -f $mountpoint/randomfile2 + + # second one should have the second file and not the first, and the shell should be there + run_buildah from --quiet --pull=false $WITH_POLICY_JSON with-random-2 + cid=$output + run_buildah mount $cid + mountpoint=$output + test -s $mountpoint/bin/sh || test -L $mountpoint/bin/sh + cmp ${BATS_TMPDIR}/randomfile2 $mountpoint/in-a-subdir/randomfile2 + run stat -c %u:%g $mountpoint + [ $status -eq 0 ] + rootowner=$output + run stat -c %u:%g:%A $mountpoint/in-a-subdir/randomfile2 + [ $status -eq 0 ] + assert ${rootowner}:-rw-r--r-- + ! test -f $mountpoint/randomfile1 + + # third one should have both files, and the shell should be there + run_buildah from --quiet --pull=false $WITH_POLICY_JSON with-random-both + cid=$output + run_buildah mount $cid + mountpoint=$output + test -s $mountpoint/bin/sh || test -L $mountpoint/bin/sh + cmp ${BATS_TMPDIR}/randomfile1 $mountpoint/randomfile1 + run stat -c %u:%g $mountpoint + [ $status -eq 0 ] + rootowner=$output + run stat -c %u:%g:%A $mountpoint/randomfile1 + [ $status -eq 0 ] + assert ${rootowner}:-rw-r--r-- + cmp ${BATS_TMPDIR}/randomfile2 $mountpoint/in-a-subdir/randomfile2 + run stat -c %u:%g:%A $mountpoint/in-a-subdir/randomfile2 + [ $status -eq 0 ] + assert ${rootowner}:-rw-r--r-- + done +} From 47ecb727f735cc686767976f6cef4f4622a2c565 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Sun, 17 Dec 2023 07:00:58 -0500 Subject: [PATCH 05/24] Document use of containers-transports values in buildah Fixes: https://github.com/containers/buildah/issues/4740 Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- docs/buildah-build.1.md | 11 ++++++++++- docs/buildah-commit.1.md | 7 ++++++- docs/buildah-push.1.md | 2 +- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/buildah-build.1.md b/docs/buildah-build.1.md index fca504fb582..41037e790ad 100644 --- a/docs/buildah-build.1.md +++ b/docs/buildah-build.1.md @@ -708,6 +708,8 @@ Valid _type_ values are: If no type is specified, the value defaults to **local**. Alternatively, instead of a comma-separated sequence, the value of **--output** can be just a destination (in the `**dest** format) (e.g. `--output some-path`, `--output -`) where `--output some-path` is treated as if **type=local** and `--output -` is treated as if **type=tar**. +Note: The **--tag** option can also be used to change the file image format to supported `containers-transports(5)`. + **--pid** *how* Sets the configuration for PID namespaces when handling `RUN` instructions. @@ -872,6 +874,13 @@ Specifies the name which will be assigned to the resulting image if the build process completes successfully. If _imageName_ does not include a registry name component, the registry name *localhost* will be prepended to the image name. +The **--tag** option supports all transports from `containers-transports(5)`. +If no transport is specified, the `container-storage` (i.e., local storage) transport is used. + + __buildah build --tag=oci-archive:./foo.ociarchive .__ + + __buildah build -t quay.io/username/foo .__ + **--target** *stageName* Set the target build stage to build. When building a Containerfile with multiple build stages, --target @@ -1310,7 +1319,7 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), cpp(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), crun(1), runc(8), containers.conf(5), oci-hooks(5) +buildah(1), cpp(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), crun(1), runc(8), containers.conf(5), oci-hooks(5), containers-transports(5) ## FOOTNOTES 1: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately. diff --git a/docs/buildah-commit.1.md b/docs/buildah-commit.1.md index 5e1b88eee74..5ea00e89770 100644 --- a/docs/buildah-commit.1.md +++ b/docs/buildah-commit.1.md @@ -14,6 +14,8 @@ with a registry name component, `localhost` will be added to the name. If name, the `buildah images` command will display `` in the `REPOSITORY` and `TAG` columns. +The *Image* value supports all transports from `containers-transports(5)`. If no transport is specified, the `container-storage` (i.e., local storage) transport is used. + ## RETURN VALUE The image ID of the image that was created. On error, 1 is returned and errno is returned. @@ -203,6 +205,9 @@ This example saves an image based on the container. This example saves an image named newImageName based on the container. `buildah commit --rm containerID newImageName` +This example commits to an OCI Directory named /tmp/newImageName based on the container. + `buildah commit --rm containerID oci-archive:/tmp/newImageName` + This example saves an image with no name, removes the working container, and creates a new container using the image's ID. `buildah from $(buildah commit --rm containerID)` @@ -268,4 +273,4 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), buildah-images(1), containers-policy.json(5), containers-registries.conf(5) +buildah(1), buildah-images(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5) diff --git a/docs/buildah-push.1.md b/docs/buildah-push.1.md index 1d57bc8bb5f..f83c90b4fda 100644 --- a/docs/buildah-push.1.md +++ b/docs/buildah-push.1.md @@ -181,4 +181,4 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), buildah-login(1), containers-policy.json(5), docker-login(1), containers-registries.conf(5), buildah-manifest(1) +buildah(1), buildah-login(1), containers-policy.json(5), docker-login(1), containers-registries.conf(5), buildah-manifest(1), containers-transports(5) From f818763f3601bbe42b7fe611c73af2b5576a25fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Sat, 30 Dec 2023 18:10:49 +0000 Subject: [PATCH 06/24] Replace strings.SplitN with strings.Cut MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduced in go 1.18: https://github.com/golang/go/issues/46336 [NO NEW TESTS NEEDED] Signed-off-by: Philip Dubé Signed-off-by: tomsweeneyredhat --- imagebuildah/executor.go | 49 +++++------- imagebuildah/stage_executor.go | 45 +++++------ internal/source/add.go | 10 +-- internal/volumes/volumes.go | 133 ++++++++++++++++----------------- 4 files changed, 106 insertions(+), 131 deletions(-) diff --git a/imagebuildah/executor.go b/imagebuildah/executor.go index 917c84f6cee..e0a45988a8a 100644 --- a/imagebuildah/executor.go +++ b/imagebuildah/executor.go @@ -337,13 +337,13 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // We have to be careful here - it's either an argument // and value, or just an argument, since they can be // separated by either "=" or whitespace. - list := strings.SplitN(arg.Value, "=", 2) + argName, argValue, hasValue := strings.Cut(arg.Value, "=") if !foundFirstStage { - if len(list) > 1 { - globalArgs[list[0]] = list[1] + if hasValue { + globalArgs[argName] = argValue } } - delete(exec.unusedArgs, list[0]) + delete(exec.unusedArgs, argName) } case "FROM": foundFirstStage = true @@ -496,12 +496,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE var labelLine string labels := append([]string{}, b.labels...) for _, labelSpec := range labels { - label := strings.SplitN(labelSpec, "=", 2) - key := label[0] - value := "" - if len(label) > 1 { - value = label[1] - } + key, value, _ := strings.Cut(labelSpec, "=") // check only for an empty key since docker allows empty values if key != "" { labelLine += fmt.Sprintf(" %q=%q", key, value) @@ -523,10 +518,8 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE if len(b.envs) > 0 { var envLine string for _, envSpec := range b.envs { - env := strings.SplitN(envSpec, "=", 2) - key := env[0] - if len(env) > 1 { - value := env[1] + key, value, hasValue := strings.Cut(envSpec, "=") + if hasValue { envLine += fmt.Sprintf(" %q=%q", key, value) } else { return "", nil, false, fmt.Errorf("BUG: unresolved environment variable: %q", key) @@ -844,24 +837,18 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image mountFlags := strings.TrimPrefix(flag, "--mount=") fields := strings.Split(mountFlags, ",") for _, field := range fields { - if strings.HasPrefix(field, "from=") { - fromField := strings.SplitN(field, "=", 2) - if len(fromField) > 1 { - mountFrom := fromField[1] - // Check if this base is a stage if yes - // add base to current stage's dependency tree - // but also confirm if this is not in additional context. - if _, ok := b.additionalBuildContexts[mountFrom]; !ok { - // Treat from as a rootfs we need to preserve - b.rootfsMap[mountFrom] = true - if _, ok := dependencyMap[mountFrom]; ok { - // update current stage's dependency info - currentStageInfo := dependencyMap[stage.Name] - currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom) - } + if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom { + // Check if this base is a stage if yes + // add base to current stage's dependency tree + // but also confirm if this is not in additional context. + if _, ok := b.additionalBuildContexts[mountFrom]; !ok { + // Treat from as a rootfs we need to preserve + b.rootfsMap[mountFrom] = true + if _, ok := dependencyMap[mountFrom]; ok { + // update current stage's dependency info + currentStageInfo := dependencyMap[stage.Name] + currentStageInfo.Needs = append(currentStageInfo.Needs, mountFrom) } - } else { - return "", nil, fmt.Errorf("invalid value for field `from=`: %q", fromField[1]) } } } diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 9398dcef8da..4bb86afc922 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -565,24 +565,23 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte stageMountPoints := make(map[string]internal.StageMountDetails) for _, flag := range mountList { if strings.Contains(flag, "from") { - arr := strings.SplitN(flag, ",", 2) - if len(arr) < 2 { + tokens := strings.Split(flag, ",") + if len(tokens) < 2 { return nil, fmt.Errorf("Invalid --mount command: %s", flag) } - tokens := strings.Split(flag, ",") - for _, val := range tokens { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + for _, token := range tokens { + key, val, hasVal := strings.Cut(token, "=") + switch key { case "from": - if len(kv) == 1 { + if !hasVal { return nil, fmt.Errorf("unable to resolve argument for `from=`: bad argument") } - if kv[1] == "" { + if val == "" { return nil, fmt.Errorf("unable to resolve argument for `from=`: from points to an empty value") } - from, fromErr := imagebuilder.ProcessWord(kv[1], s.stage.Builder.Arguments()) + from, fromErr := imagebuilder.ProcessWord(val, s.stage.Builder.Arguments()) if fromErr != nil { - return nil, fmt.Errorf("unable to resolve argument %q: %w", kv[1], fromErr) + return nil, fmt.Errorf("unable to resolve argument %q: %w", val, fromErr) } // If additional buildContext contains this // give priority to that and break if additional @@ -1748,9 +1747,9 @@ func (s *StageExecutor) getBuildArgsResolvedForRun() string { dockerConfig := s.stage.Builder.Config() for _, env := range dockerConfig.Env { - splitv := strings.SplitN(env, "=", 2) - if len(splitv) == 2 { - configuredEnvs[splitv[0]] = splitv[1] + key, val, hasVal := strings.Cut(env, "=") + if hasVal { + configuredEnvs[key] = val } } @@ -2102,8 +2101,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.SetPort(string(p)) } for _, envSpec := range config.Env { - spec := strings.SplitN(envSpec, "=", 2) - s.builder.SetEnv(spec[0], spec[1]) + key, val, _ := strings.Cut(envSpec, "=") + s.builder.SetEnv(key, val) } for _, envSpec := range s.executor.unsetEnvs { s.builder.UnsetEnv(envSpec) @@ -2139,12 +2138,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer // an intermediate image, in such case we must // honor layer labels if they are configured. for _, labelString := range s.executor.layerLabels { - label := strings.SplitN(labelString, "=", 2) - if len(label) > 1 { - s.builder.SetLabel(label[0], label[1]) - } else { - s.builder.SetLabel(label[0], "") - } + labelk, labelv, _ := strings.Cut(labelString, "=") + s.builder.SetLabel(labelk, labelv) } } for k, v := range config.Labels { @@ -2157,12 +2152,8 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer s.builder.UnsetLabel(key) } for _, annotationSpec := range s.executor.annotations { - annotation := strings.SplitN(annotationSpec, "=", 2) - if len(annotation) > 1 { - s.builder.SetAnnotation(annotation[0], annotation[1]) - } else { - s.builder.SetAnnotation(annotation[0], "") - } + annotationk, annotationv, _ := strings.Cut(annotationSpec, "=") + s.builder.SetAnnotation(annotationk, annotationv) } if imageRef != nil { logName := transports.ImageName(imageRef) diff --git a/internal/source/add.go b/internal/source/add.go index 8363c62e5a2..64e944554c1 100644 --- a/internal/source/add.go +++ b/internal/source/add.go @@ -26,14 +26,14 @@ func (o *AddOptions) annotations() (map[string]string, error) { annotations := make(map[string]string) for _, unparsed := range o.Annotations { - parsed := strings.SplitN(unparsed, "=", 2) - if len(parsed) != 2 { + key, value, hasValue := strings.Cut(unparsed, "=") + if !hasValue { return nil, fmt.Errorf("invalid annotation %q (expected format is \"key=value\")", unparsed) } - if _, exists := annotations[parsed[0]]; exists { - return nil, fmt.Errorf("annotation %q specified more than once", parsed[0]) + if _, exists := annotations[key]; exists { + return nil, fmt.Errorf("annotation %q specified more than once", key) } - annotations[parsed[0]] = parsed[1] + annotations[key] = value } return annotations, nil diff --git a/internal/volumes/volumes.go b/internal/volumes/volumes.go index a79b8df8c5e..88f0435ccc0 100644 --- a/internal/volumes/volumes.go +++ b/internal/volumes/volumes.go @@ -69,8 +69,8 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st fromImage := "" for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + argName, argValue, hasArgValue := strings.Cut(val, "=") + switch argName { case "type": // This is already processed continue @@ -80,7 +80,7 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st case "ro", "nosuid", "nodev", "noexec": // TODO: detect duplication of these options. // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) mountReadability = true case "rw", "readwrite": newMount.Options = append(newMount.Options, "rw") @@ -90,27 +90,27 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st newMount.Options = append(newMount.Options, "ro") mountReadability = true case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) case "from": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - fromImage = kv[1] + fromImage = argValue case "bind-propagation": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, kv[1]) + newMount.Options = append(newMount.Options, argValue) case "src", "source": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Source = kv[1] + newMount.Source = argValue case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", argName, errBadOptionArg) } - targetPath := kv[1] + targetPath := argValue if !path.IsAbs(targetPath) { targetPath = filepath.Join(workDir, targetPath) } @@ -124,23 +124,20 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg) } setRelabel = true - if len(kv) != 2 { - return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) - } - switch kv[1] { + switch argValue { case "private": newMount.Options = append(newMount.Options, "Z") case "shared": newMount.Options = append(newMount.Options, "z") default: - return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) + return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", argName, errBadMntOption) } case "consistency": // Option for OS X only, has no meaning on other platforms // and can thus be safely ignored. // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts default: - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) + return newMount, "", fmt.Errorf("%v: %w", argName, errBadMntOption) } } @@ -244,15 +241,15 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a sharing := "shared" for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + argName, argValue, hasArgValue := strings.Cut(val, "=") + switch argName { case "type": // This is already processed continue case "nosuid", "nodev", "noexec": // TODO: detect duplication of these options. // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) case "rw", "readwrite": newMount.Options = append(newMount.Options, "rw") case "readonly", "ro": @@ -260,33 +257,33 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a newMount.Options = append(newMount.Options, "ro") setReadOnly = true case "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) foundSElinuxLabel = true case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) setShared = true case "sharing": - sharing = kv[1] + sharing = argValue case "bind-propagation": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, kv[1]) + newMount.Options = append(newMount.Options, argValue) case "id": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - id = kv[1] + id = argValue case "from": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - fromStage = kv[1] + fromStage = argValue case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - targetPath := kv[1] + targetPath := argValue if !path.IsAbs(targetPath) { targetPath = filepath.Join(workDir, targetPath) } @@ -296,36 +293,36 @@ func GetCacheMount(args []string, store storage.Store, imageMountLabel string, a newMount.Destination = targetPath setDest = true case "src", "source": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Source = kv[1] + newMount.Source = argValue case "mode": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - mode, err = strconv.ParseUint(kv[1], 8, 32) + mode, err = strconv.ParseUint(argValue, 8, 32) if err != nil { return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) } case "uid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - uid, err = strconv.Atoi(kv[1]) + uid, err = strconv.Atoi(argValue) if err != nil { return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) } case "gid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - gid, err = strconv.Atoi(kv[1]) + gid, err = strconv.Atoi(argValue) if err != nil { return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) } default: - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + return newMount, nil, fmt.Errorf("%v: %w", argName, errBadMntOption) } } @@ -590,42 +587,42 @@ func GetTmpfsMount(args []string) (specs.Mount, error) { setDest := false for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { + argName, argValue, hasArgValue := strings.Cut(val, "=") + switch argName { case "type": // This is already processed continue case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) case "readonly": // Alias for "ro" newMount.Options = append(newMount.Options, "ro") case "tmpcopyup": //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. - newMount.Options = append(newMount.Options, kv[0]) + newMount.Options = append(newMount.Options, argName) case "tmpfs-mode": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", argValue)) case "tmpfs-size": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", argValue)) case "src", "source": return newMount, errors.New("source is not supported with tmpfs mounts") case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + if !hasArgValue { + return newMount, fmt.Errorf("%v: %w", argName, errBadOptionArg) } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + if err := parse.ValidateVolumeCtrDir(argValue); err != nil { return newMount, err } - newMount.Destination = kv[1] + newMount.Destination = argValue setDest = true default: - return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + return newMount, fmt.Errorf("%v: %w", argName, errBadMntOption) } } From 702dfe84f67662b28a2c433a56cac969fd19c6dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Sat, 30 Dec 2023 17:42:31 +0000 Subject: [PATCH 07/24] Replace map[K]bool with map[K]struct{} where it makes sense MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Philip Dubé Signed-off-by: tomsweeneyredhat --- chroot/run_test.go | 8 +++--- imagebuildah/build.go | 6 ++--- imagebuildah/executor.go | 47 +++++++++++++++++----------------- imagebuildah/stage_executor.go | 4 +-- internal/util/util.go | 5 ++++ pkg/overlay/overlay.go | 10 ++++---- pkg/parse/parse.go | 12 ++++----- 7 files changed, 50 insertions(+), 42 deletions(-) diff --git a/chroot/run_test.go b/chroot/run_test.go index bb883e53091..35f45e7df33 100644 --- a/chroot/run_test.go +++ b/chroot/run_test.go @@ -497,7 +497,7 @@ func TestMounts(t *testing.T) { }) }, func(t *testing.T, report *types.TestReport) { - foundMounts := make(map[string]bool) + foundBindDestinationMount := false for _, mount := range report.Spec.Mounts { if mount.Destination == bind.destination { allRequired := true @@ -516,10 +516,12 @@ func TestMounts(t *testing.T) { anyRejected = true } } - foundMounts[mount.Destination] = allRequired && !anyRejected + if allRequired && !anyRejected { + foundBindDestinationMount = true + } } } - if !foundMounts[bind.destination] { + if !foundBindDestinationMount { t.Errorf("added mount for %s not found with the right flags (%v) in %+v", bind.destination, bind.options, report.Spec.Mounts) } }, diff --git a/imagebuildah/build.go b/imagebuildah/build.go index 03081fde9fd..112bd2b62a7 100644 --- a/imagebuildah/build.go +++ b/imagebuildah/build.go @@ -651,7 +651,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri return nil, fmt.Errorf("reading multiple stages: %w", err) } var baseImages []string - nicknames := make(map[string]bool) + nicknames := make(map[string]struct{}) for stageIndex, stage := range stages { node := stage.Node // first line for node != nil { // each line @@ -673,7 +673,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri } } base := child.Next.Value - if base != "" && base != buildah.BaseImageFakeName && !nicknames[base] { + if base != "" && base != buildah.BaseImageFakeName && !internalUtil.SetHas(nicknames, base) { headingArgs := argsMapToSlice(stage.Builder.HeadingArgs) userArgs := argsMapToSlice(stage.Builder.Args) // append heading args so if --build-arg key=value is not @@ -692,7 +692,7 @@ func baseImages(dockerfilenames []string, dockerfilecontents [][]byte, from stri node = node.Next // next line } if stage.Name != strconv.Itoa(stageIndex) { - nicknames[stage.Name] = true + nicknames[stage.Name] = struct{}{} } } return baseImages, nil diff --git a/imagebuildah/executor.go b/imagebuildah/executor.go index e0a45988a8a..8e11bc2a757 100644 --- a/imagebuildah/executor.go +++ b/imagebuildah/executor.go @@ -14,6 +14,7 @@ import ( "github.com/containers/buildah" "github.com/containers/buildah/define" + internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" @@ -41,19 +42,19 @@ import ( // complain if we're given values for arguments which have no corresponding ARG // instruction in the Dockerfile, since that's usually an indication of a user // error, but for these values we make exceptions and ignore them. -var builtinAllowedBuildArgs = map[string]bool{ - "HTTP_PROXY": true, - "http_proxy": true, - "HTTPS_PROXY": true, - "https_proxy": true, - "FTP_PROXY": true, - "ftp_proxy": true, - "NO_PROXY": true, - "no_proxy": true, - "TARGETARCH": true, - "TARGETOS": true, - "TARGETPLATFORM": true, - "TARGETVARIANT": true, +var builtinAllowedBuildArgs = map[string]struct{}{ + "HTTP_PROXY": {}, + "http_proxy": {}, + "HTTPS_PROXY": {}, + "https_proxy": {}, + "FTP_PROXY": {}, + "ftp_proxy": {}, + "NO_PROXY": {}, + "no_proxy": {}, + "TARGETARCH": {}, + "TARGETOS": {}, + "TARGETPLATFORM": {}, + "TARGETVARIANT": {}, } // Executor is a buildah-based implementation of the imagebuilder.Executor @@ -110,8 +111,8 @@ type Executor struct { forceRmIntermediateCtrs bool imageMap map[string]string // Used to map images that we create to handle the AS construct. containerMap map[string]*buildah.Builder // Used to map from image names to only-created-for-the-rootfs containers. - baseMap map[string]bool // Holds the names of every base image, as given. - rootfsMap map[string]bool // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. + baseMap map[string]struct{} // Holds the names of every base image, as given. + rootfsMap map[string]struct{} // Holds the names of every stage whose rootfs is referenced in a COPY or ADD instruction. blobDirectory string excludes []string groupAdd []string @@ -278,8 +279,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o forceRmIntermediateCtrs: options.ForceRmIntermediateCtrs, imageMap: make(map[string]string), containerMap: make(map[string]*buildah.Builder), - baseMap: make(map[string]bool), - rootfsMap: make(map[string]bool), + baseMap: make(map[string]struct{}), + rootfsMap: make(map[string]struct{}), blobDirectory: options.BlobDirectory, unusedArgs: make(map[string]struct{}), capabilities: capabilities, @@ -606,7 +607,7 @@ func markDependencyStagesForTarget(dependencyMap map[string]*stageDependencyInfo } func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMap map[string]*stageDependencyInfo, args map[string]string) { - argFound := make(map[string]bool) + argFound := make(map[string]struct{}) for _, stage := range stages { node := stage.Node // first line for node != nil { // each line @@ -617,12 +618,12 @@ func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMa if strings.Contains(argName, "=") { res := strings.Split(argName, "=") if res[1] != "" { - argFound[res[0]] = true + argFound[res[0]] = struct{}{} } } argHasValue := true if !strings.Contains(argName, "=") { - argHasValue = argFound[argName] + argHasValue = internalUtil.SetHas(argFound, argName) } if _, ok := args[argName]; !argHasValue && !ok { shouldWarn := true @@ -772,7 +773,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image if err != nil { return "", nil, fmt.Errorf("while replacing arg variables with values for format %q: %w", base, err) } - b.baseMap[baseWithArg] = true + b.baseMap[baseWithArg] = struct{}{} logrus.Debugf("base for stage %d: %q", stageIndex, base) // Check if selected base is not an additional // build context and if base is a valid stage @@ -794,7 +795,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // was named using argument values, we might // not record the right value here. rootfs := strings.TrimPrefix(flag, "--from=") - b.rootfsMap[rootfs] = true + b.rootfsMap[rootfs] = struct{}{} logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs) // Populate dependency tree and check // if following ADD or COPY needs any other @@ -843,7 +844,7 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // but also confirm if this is not in additional context. if _, ok := b.additionalBuildContexts[mountFrom]; !ok { // Treat from as a rootfs we need to preserve - b.rootfsMap[mountFrom] = true + b.rootfsMap[mountFrom] = struct{}{} if _, ok := dependencyMap[mountFrom]; ok { // update current stage's dependency info currentStageInfo := dependencyMap[stage.Name] diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 4bb86afc922..79d1818de27 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1043,8 +1043,8 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, moreStages := s.index < len(s.stages)-1 lastStage := !moreStages onlyBaseImage := false - imageIsUsedLater := moreStages && (s.executor.baseMap[stage.Name] || s.executor.baseMap[strconv.Itoa(stage.Position)]) - rootfsIsUsedLater := moreStages && (s.executor.rootfsMap[stage.Name] || s.executor.rootfsMap[strconv.Itoa(stage.Position)]) + imageIsUsedLater := moreStages && (internalUtil.SetHas(s.executor.baseMap, stage.Name) || internalUtil.SetHas(s.executor.baseMap, strconv.Itoa(stage.Position))) + rootfsIsUsedLater := moreStages && (internalUtil.SetHas(s.executor.rootfsMap, stage.Name) || internalUtil.SetHas(s.executor.rootfsMap, strconv.Itoa(stage.Position))) // If the base image's name corresponds to the result of an earlier // stage, make sure that stage has finished building an image, and diff --git a/internal/util/util.go b/internal/util/util.go index 01f4b1051c7..dbcaa2375f7 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -97,3 +97,8 @@ func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { } return nil } + +func SetHas(m map[string]struct{}, k string) bool { + _, ok := m[k] + return ok +} diff --git a/pkg/overlay/overlay.go b/pkg/overlay/overlay.go index bbcc8eac695..666a0a01ae2 100644 --- a/pkg/overlay/overlay.go +++ b/pkg/overlay/overlay.go @@ -113,10 +113,10 @@ func MountReadOnly(contentDir, source, dest string, rootUID, rootGID int, graphO // findMountProgram finds if any mount program is specified in the graph options. func findMountProgram(graphOptions []string) string { - mountMap := map[string]bool{ - ".mount_program": true, - "overlay.mount_program": true, - "overlay2.mount_program": true, + mountMap := map[string]struct{}{ + ".mount_program": {}, + "overlay.mount_program": {}, + "overlay2.mount_program": {}, } for _, i := range graphOptions { @@ -126,7 +126,7 @@ func findMountProgram(graphOptions []string) string { } key := s[0] val := s[1] - if mountMap[key] { + if _, has := mountMap[key]; has { return val } } diff --git a/pkg/parse/parse.go b/pkg/parse/parse.go index d865f5044f9..65b4a8a9a67 100644 --- a/pkg/parse/parse.go +++ b/pkg/parse/parse.go @@ -1053,19 +1053,19 @@ func Device(device string) (string, string, string, error) { // isValidDeviceMode checks if the mode for device is valid or not. // isValid mode is a composition of r (read), w (write), and m (mknod). func isValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, + var legalDeviceMode = map[rune]struct{}{ + 'r': {}, + 'w': {}, + 'm': {}, } if mode == "" { return false } for _, c := range mode { - if !legalDeviceMode[c] { + if _, has := legalDeviceMode[c]; !has { return false } - legalDeviceMode[c] = false + delete(legalDeviceMode, c) } return true } From b8c0e21cf9f9b415618f1c9fea0fb9b21e49360a Mon Sep 17 00:00:00 2001 From: flouthoc Date: Wed, 10 Jan 2024 10:16:30 -0800 Subject: [PATCH 08/24] stage_executor,layers: burst cache if heredoc content is changed When using buildah with `--layers` then buildah must correctly burst layer cache if `heredoc` content is changed. Following is achieved via properly adding `heredoc` content to the history of the built image. Closes: https://github.com/containers/buildah/issues/5225 Signed-off-by: flouthoc Signed-off-by: tomsweeneyredhat --- imagebuildah/stage_executor.go | 9 ++++++++- tests/bud.bats | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 79d1818de27..8bef0191f91 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1725,7 +1725,14 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri if buildArgs != "" { return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] } - return "/bin/sh -c " + node.Original[4:] + result := "/bin/sh -c " + node.Original[4:] + if len(node.Heredocs) > 0 { + for _, doc := range node.Heredocs { + heredocContent := strings.TrimSpace(doc.Content) + result = result + "\n" + heredocContent + } + } + return result case "ADD", "COPY": destination := node for destination.Next != nil { diff --git a/tests/bud.bats b/tests/bud.bats index ae299852648..c8cda872b56 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -293,6 +293,40 @@ _EOF run_buildah 1 run myctr ls -l subdir/ } +@test "bud --layers should not hit cache if heredoc is changed" { + local contextdir=${TEST_SCRATCH_DIR}/bud/platform + mkdir -p $contextdir + + cat > $contextdir/Dockerfile << _EOF +FROM alpine +RUN <> /hello +EOF +RUN cat hello +_EOF + + # on first run since there is no cache so `Cache burst` must be printed + run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile + expect_output --substring "Cache burst" + + # on second run since there is cache so `Cache burst` should not be printed + run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile + # output should not contain cache burst + assert "$output" !~ "Cache burst" + + cat > $contextdir/Dockerfile << _EOF +FROM alpine +RUN <> /hello +EOF +RUN cat hello +_EOF + + # on third run since we have changed heredoc so `Cache burst` must be printed. + run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile + expect_output --substring "Cache burst" +} + @test "bud build with heredoc content" { run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile . expect_output --substring "print first line from heredoc" From 6cacd1478223b9cffd9183d87e85784d792c3f63 Mon Sep 17 00:00:00 2001 From: flouthoc Date: Mon, 8 Jan 2024 13:15:58 -0800 Subject: [PATCH 09/24] stage_executor,heredoc: honor interpreter in heredoc If there are any shebang in heredoc file then buildah must honor that. Consider a case of ```Dockerfile FROM python:3.11-slim-bullseye RUN < Signed-off-by: tomsweeneyredhat --- imagebuildah/stage_executor.go | 18 ++++++++++++++++-- tests/bud.bats | 8 ++++++++ tests/bud/heredoc/Containerfile.she_bang | 6 ++++++ 3 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 tests/bud/heredoc/Containerfile.she_bang diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 8bef0191f91..b5cf7bd8273 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -683,6 +683,15 @@ func (s *StageExecutor) createNeededHeredocMountsForRun(files []imagebuilder.Fil return mountResult, nil } +func parseSheBang(data string) string { + lines := strings.Split(data, "\n") + if len(lines) > 2 && strings.HasPrefix(lines[1], "#!") { + shebang := strings.TrimLeft(lines[1], "#!") + return shebang + } + return "" +} + // Run executes a RUN instruction using the stage's current working container // as a root directory. func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { @@ -693,12 +702,17 @@ func (s *StageExecutor) Run(run imagebuilder.Run, config docker.Config) error { if heredoc := buildkitparser.MustParseHeredoc(args[0]); heredoc != nil { if strings.HasPrefix(run.Files[0].Data, "#!") || strings.HasPrefix(run.Files[0].Data, "\n#!") { // This is a single heredoc with a shebang, so create a file - // and run it. + // and run it with program specified in shebang. heredocMount, err := s.createNeededHeredocMountsForRun(run.Files) if err != nil { return err } - args = []string{heredocMount[0].Destination} + shebangArgs := parseSheBang(run.Files[0].Data) + if shebangArgs != "" { + args = []string{shebangArgs + " " + heredocMount[0].Destination} + } else { + args = []string{heredocMount[0].Destination} + } heredocMounts = append(heredocMounts, heredocMount...) } else { args = []string{run.Files[0].Data} diff --git a/tests/bud.bats b/tests/bud.bats index c8cda872b56..7963cf22893 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -354,6 +354,14 @@ _EOF expect_output --substring "this is the output of test10" } +@test "bud build with heredoc content with inline interpreter" { + skip_if_in_container + _prefetch busybox + run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile.she_bang . + expect_output --substring "this is the output of test11" + expect_output --substring "this is the output of test12" +} + @test "bud build with heredoc verify mount leak" { skip_if_in_container _prefetch alpine diff --git a/tests/bud/heredoc/Containerfile.she_bang b/tests/bud/heredoc/Containerfile.she_bang new file mode 100644 index 00000000000..c848ef31987 --- /dev/null +++ b/tests/bud/heredoc/Containerfile.she_bang @@ -0,0 +1,6 @@ +FROM python:3.11-slim-bullseye +RUN < Date: Mon, 8 Jan 2024 09:58:08 -0500 Subject: [PATCH 10/24] docs: fix a couple of typos And don't refer to a filename in an example as a directory in accompanying text. Signed-off-by: Nalin Dahyabhai Signed-off-by: tomsweeneyredhat --- docs/buildah-build.1.md | 2 +- docs/buildah-commit.1.md | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/buildah-build.1.md b/docs/buildah-build.1.md index 41037e790ad..53abe6added 100644 --- a/docs/buildah-build.1.md +++ b/docs/buildah-build.1.md @@ -875,7 +875,7 @@ process completes successfully. If _imageName_ does not include a registry name component, the registry name *localhost* will be prepended to the image name. The **--tag** option supports all transports from `containers-transports(5)`. -If no transport is specified, the `container-storage` (i.e., local storage) transport is used. +If no transport is specified, the `containers-storage` (i.e., local storage) transport is used. __buildah build --tag=oci-archive:./foo.ociarchive .__ diff --git a/docs/buildah-commit.1.md b/docs/buildah-commit.1.md index 5ea00e89770..fd87e5867ba 100644 --- a/docs/buildah-commit.1.md +++ b/docs/buildah-commit.1.md @@ -14,7 +14,7 @@ with a registry name component, `localhost` will be added to the name. If name, the `buildah images` command will display `` in the `REPOSITORY` and `TAG` columns. -The *Image* value supports all transports from `containers-transports(5)`. If no transport is specified, the `container-storage` (i.e., local storage) transport is used. +The *image* value supports all transports from `containers-transports(5)`. If no transport is specified, the `containers-storage` (i.e., local storage) transport is used. ## RETURN VALUE The image ID of the image that was created. On error, 1 is returned and errno is returned. @@ -202,11 +202,11 @@ Unset environment variables from the final image. This example saves an image based on the container. `buildah commit containerID newImageName` -This example saves an image named newImageName based on the container. +This example saves an image named newImageName based on the container and removes the working container. `buildah commit --rm containerID newImageName` -This example commits to an OCI Directory named /tmp/newImageName based on the container. - `buildah commit --rm containerID oci-archive:/tmp/newImageName` +This example commits to an OCI archive file named /tmp/newImageName based on the container. + `buildah commit containerID oci-archive:/tmp/newImageName` This example saves an image with no name, removes the working container, and creates a new container using the image's ID. `buildah from $(buildah commit --rm containerID)` From 6fe37a59b4866bcba8638c7a3c174942a31d46bf Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Wed, 17 Jan 2024 16:14:43 -0500 Subject: [PATCH 11/24] commit: force omitHistory if the parent has layers but no history If the parent image has layers but no history, force our own omitHistory setting on. The alternative is to create a history that only explains the presence of some of the layers in our output image, which looks broken to everyone who might consume that image, including ourselves if we try to use it as a base image later. Signed-off-by: Nalin Dahyabhai Signed-off-by: tomsweeneyredhat --- image.go | 12 +++++++++++- tests/bud.bats | 6 ++++++ tests/bud/no-history/Dockerfile | 10 ++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 tests/bud/no-history/Dockerfile diff --git a/image.go b/image.go index d1c3a3c11d7..be1e239bd8d 100644 --- a/image.go +++ b/image.go @@ -1050,11 +1050,21 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR } parent := "" + forceOmitHistory := false if b.FromImageID != "" { parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID) if parentDigest.Validate() == nil { parent = parentDigest.String() } + if !options.OmitHistory && len(b.OCIv1.History) == 0 && len(b.OCIv1.RootFS.DiffIDs) != 0 { + // Parent had layers, but no history. We shouldn't confuse + // our own confidence checks by adding history for layers + // that we're adding, creating an image with multiple layers, + // only some of which have history entries, which would be + // broken in confusing ways. + b.Logger.Debugf("parent image %q had no history but had %d layers, assuming OmitHistory", b.FromImageID, len(b.OCIv1.RootFS.DiffIDs)) + forceOmitHistory = true + } } ref := &containerImageRef{ @@ -1076,7 +1086,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR preferredManifestType: manifestType, squash: options.Squash, confidentialWorkload: options.ConfidentialWorkloadOptions, - omitHistory: options.OmitHistory, + omitHistory: options.OmitHistory || forceOmitHistory, emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert, idMappingOptions: &b.IDMappingOptions, parent: parent, diff --git a/tests/bud.bats b/tests/bud.bats index 7963cf22893..83bfd876d11 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -4379,6 +4379,12 @@ EOM expect_output "" } +@test "bud-implicit-no-history" { + _prefetch nixery.dev/shell + run_buildah build $WITH_POLICY_JSON --layers=false $BUDFILES/no-history + run_buildah build $WITH_POLICY_JSON --layers=true $BUDFILES/no-history +} + @test "bud with encrypted FROM image" { _prefetch busybox local contextdir=${TEST_SCRATCH_DIR}/tmp diff --git a/tests/bud/no-history/Dockerfile b/tests/bud/no-history/Dockerfile new file mode 100644 index 00000000000..ca69dc54806 --- /dev/null +++ b/tests/bud/no-history/Dockerfile @@ -0,0 +1,10 @@ +# The important thing about that first base image is that it has no history +# entries, but it does have at least one layer. + +FROM nixery.dev/shell AS first-stage +RUN date > /date1.txt +RUN sleep 1 > /sleep1.txt + +FROM first-stage +RUN date > /date2.txt +RUN sleep 1 > /sleep2.txt From bb70cfc50656c7b44f14453494310cb68f2f74ac Mon Sep 17 00:00:00 2001 From: Ed Santiago Date: Wed, 24 Jan 2024 12:29:42 -0700 Subject: [PATCH 12/24] Remove a bad FROM line Some new heredoc test added "FROM blah blah python whatever", an image that (presumably) exists on docker.io but does not exist in our cache. Plus, test was completely broken anyway. It would've found the "this is the output" lines even without python, as part of the verbose build. Solution: don't use python. You don't need python to test a shebang. You can use anything. 'cat' is traditional, but I choose 'rev' because that makes it nearly impossible for the test to match merely due to a build-step echo. Signed-off-by: Ed Santiago Signed-off-by: tomsweeneyredhat --- tests/bud.bats | 5 +++-- tests/bud/heredoc/Containerfile.she_bang | 9 +++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/bud.bats b/tests/bud.bats index 83bfd876d11..3ce72923803 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -358,8 +358,9 @@ _EOF skip_if_in_container _prefetch busybox run_buildah build -t heredoc $WITH_POLICY_JSON -f $BUDFILES/heredoc/Containerfile.she_bang . - expect_output --substring "this is the output of test11" - expect_output --substring "this is the output of test12" + expect_output --substring "# +this is the output of test11 +this is the output of test12" } @test "bud build with heredoc verify mount leak" { diff --git a/tests/bud/heredoc/Containerfile.she_bang b/tests/bud/heredoc/Containerfile.she_bang index c848ef31987..45576dc85ca 100644 --- a/tests/bud/heredoc/Containerfile.she_bang +++ b/tests/bud/heredoc/Containerfile.she_bang @@ -1,6 +1,7 @@ -FROM python:3.11-slim-bullseye +FROM alpine RUN < Date: Thu, 25 Jan 2024 16:30:15 +0000 Subject: [PATCH 13/24] Fix a build break on FreeBSD The build breaks trying to build libcontainer/userns which no longer builds on FreeBSD. Fortunately we only need this for userns.RunningInUserNS so this change moves that call to a linux-only file and adds a stub for FreeBSD. [NO NEW TESTS NEEDED] Signed-off-by: Doug Rabson Signed-off-by: tomsweeneyredhat --- add.go | 5 ++--- add_common.go | 8 ++++++++ add_linux.go | 9 +++++++++ 3 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 add_common.go create mode 100644 add_linux.go diff --git a/add.go b/add.go index c61de5a49eb..da18429f627 100644 --- a/add.go +++ b/add.go @@ -23,7 +23,6 @@ import ( "github.com/containers/storage/pkg/idtools" "github.com/hashicorp/go-multierror" digest "github.com/opencontainers/go-digest" - "github.com/opencontainers/runc/libcontainer/userns" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) @@ -438,7 +437,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodDirs: nil, ChownFiles: nil, ChmodFiles: nil, - IgnoreDevices: userns.RunningInUserNS(), + IgnoreDevices: runningInUserNS(), } putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) } @@ -579,7 +578,7 @@ func (b *Builder) Add(destination string, extract bool, options AddAndCopyOption ChmodDirs: nil, ChownFiles: nil, ChmodFiles: nil, - IgnoreDevices: userns.RunningInUserNS(), + IgnoreDevices: runningInUserNS(), } putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher)) } diff --git a/add_common.go b/add_common.go new file mode 100644 index 00000000000..b1eef2c1962 --- /dev/null +++ b/add_common.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package buildah + +func runningInUserNS() bool { + return false +} diff --git a/add_linux.go b/add_linux.go new file mode 100644 index 00000000000..78b74249627 --- /dev/null +++ b/add_linux.go @@ -0,0 +1,9 @@ +package buildah + +import ( + "github.com/opencontainers/runc/libcontainer/userns" +) + +func runningInUserNS() bool { + return userns.RunningInUserNS() +} From 78599048e092a48d9b3ce75a598835d2c0175c1c Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Mon, 29 Jan 2024 07:07:46 -0500 Subject: [PATCH 14/24] Allow users to specify no-dereference We have this same parsing code in 3 maybe 4 places in our sources, Someone needs to go through it all and get this to be parsed in less places. Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- internal/volumes/volumes.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/volumes/volumes.go b/internal/volumes/volumes.go index 88f0435ccc0..f7ac14a59df 100644 --- a/internal/volumes/volumes.go +++ b/internal/volumes/volumes.go @@ -54,6 +54,7 @@ func CacheParent() string { return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) } +// FIXME: this code needs to be merged with pkg/parse/parse.go ValidateVolumeOpts // GetBindMount parses a single bind mount entry from the --mount flag. // Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. // Caller is expected to perform unmount of any mounted images @@ -89,7 +90,10 @@ func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, st // Alias for "ro" newMount.Options = append(newMount.Options, "ro") mountReadability = true - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U", "no-dereference": + if hasArgValue { + return newMount, "", fmt.Errorf("%v: %w", val, errBadOptionArg) + } newMount.Options = append(newMount.Options, argName) case "from": if !hasArgValue { From 638a825b16e6f1abe8d14132ab7393cd0dca5149 Mon Sep 17 00:00:00 2001 From: Doug Rabson Date: Thu, 25 Jan 2024 16:38:03 +0000 Subject: [PATCH 15/24] Fix FreeBSD version parsing It was generating an error when parsing "14.0-RELEASE-p4" due to a stupid bug when attempting to check that the version part only has two parts. Signed-off-by: Doug Rabson Signed-off-by: tomsweeneyredhat --- pkg/jail/jail.go | 62 ++++++++++++++++++++++++++----------------- pkg/jail/jail_test.go | 42 +++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 24 deletions(-) create mode 100644 pkg/jail/jail_test.go diff --git a/pkg/jail/jail.go b/pkg/jail/jail.go index 3ecad963bf3..07651a59834 100644 --- a/pkg/jail/jail.go +++ b/pkg/jail/jail.go @@ -4,6 +4,7 @@ package jail import ( + "fmt" "strconv" "strings" "sync" @@ -187,6 +188,41 @@ func (j *jail) Set(jconf *config) error { return err } +func parseVersion(version string) (string, int, int, int, error) { + // Expected formats: + // .-RELEASE optionally followed by -p + // -STABLE + // -CURRENT + parts := strings.Split(string(version), "-") + if len(parts) < 2 || len(parts) > 3 { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + ver := strings.Split(parts[0], ".") + + if len(ver) != 2 { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + major, err := strconv.Atoi(ver[0]) + if err != nil { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + minor, err := strconv.Atoi(ver[1]) + if err != nil { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + patchlevel := 0 + if len(parts) == 3 { + if parts[1] != "RELEASE" || !strings.HasPrefix(parts[2], "p") { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + patchlevel, err = strconv.Atoi(strings.TrimPrefix(parts[2], "p")) + if err != nil { + return "", -1, -1, -1, fmt.Errorf("unexpected OS version: %s", version) + } + } + return parts[1], major, minor, patchlevel, nil +} + // Return true if its necessary to have a separate jail to own the vnet. For // FreeBSD 13.3 and later, we don't need a separate vnet jail since it is // possible to configure the network without either attaching to the container's @@ -194,36 +230,14 @@ func (j *jail) Set(jconf *config) error { // any reason, we fail to parse the OS version, we default to returning true. func NeedVnetJail() bool { needVnetJailOnce.Do(func() { + // FreeBSD 13.3 and later have support for 'ifconfig -j' and 'route -j' needVnetJail = true version, err := util.ReadKernelVersion() if err != nil { logrus.Errorf("failed to determine OS version: %v", err) return } - // Expected formats ".-" optionally - // followed by "-" - parts := strings.Split(string(version), "-") - if len(parts) < 2 { - logrus.Errorf("unexpected OS version: %s", version) - return - } - ver := strings.Split(parts[0], ".") - if len(parts) != 2 { - logrus.Errorf("unexpected OS version: %s", version) - return - } - - // FreeBSD 13.3 and later have support for 'ifconfig -j' and 'route -j' - major, err := strconv.Atoi(ver[0]) - if err != nil { - logrus.Errorf("unexpected OS version: %s", version) - return - } - minor, err := strconv.Atoi(ver[1]) - if err != nil { - logrus.Errorf("unexpected OS version: %s", version) - return - } + _, major, minor, _, err := parseVersion(version) if major > 13 || (major == 13 && minor > 2) { needVnetJail = false } diff --git a/pkg/jail/jail_test.go b/pkg/jail/jail_test.go new file mode 100644 index 00000000000..322d0044ff3 --- /dev/null +++ b/pkg/jail/jail_test.go @@ -0,0 +1,42 @@ +//go:build freebsd +// +build freebsd + +package jail + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseVersion(t *testing.T) { + tt := []struct { + version string + shouldFail bool + kind string + major, minor, patchlevel int + }{ + {"14.0-RELEASE", false, "RELEASE", 14, 0, 0}, + {"14.0-RELEASE-p3", false, "RELEASE", 14, 0, 3}, + {"13.2-STABLE", false, "STABLE", 13, 2, 0}, + {"14.0-STABLE", false, "STABLE", 14, 0, 0}, + {"15.0-CURRENT", false, "CURRENT", 15, 0, 0}, + + {"14-RELEASE", true, "", -1, -1, -1}, + {"14.1-STABLE-p1", true, "", -1, -1, -1}, + {"14-RELEASE-p3", true, "", -1, -1, -1}, + } + for _, tc := range tt { + kind, major, minor, patchlevel, err := parseVersion(tc.version) + if tc.shouldFail { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, kind, tc.kind) + assert.Equal(t, major, tc.major) + assert.Equal(t, minor, tc.minor) + assert.Equal(t, patchlevel, tc.patchlevel) + } + + } +} From 58820ffd1dc94470bfeb6bf05d668e72d21fd369 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Sun, 28 Jan 2024 07:25:10 -0500 Subject: [PATCH 16/24] Run codespell on code Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- Makefile | 2 +- commit.go | 2 +- docs/buildah-build.1.md | 2 +- imagebuildah/executor.go | 2 +- imagebuildah/stage_executor.go | 2 +- tests/bud.bats | 8 ++++---- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 7a521601bd5..a100101fc45 100644 --- a/Makefile +++ b/Makefile @@ -123,7 +123,7 @@ gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) codespell: - codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L passt,bu,uint,iff,od,erro -w + codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L secon,passt,bu,uint,iff,od,erro -w .PHONY: validate validate: install.tools diff --git a/commit.go b/commit.go index 222f969a0f0..16ab7a4ce94 100644 --- a/commit.go +++ b/commit.go @@ -119,7 +119,7 @@ type CommitOptions struct { // OverrideConfig is applied. OverrideChanges []string // ExtraImageContent is a map which describes additional content to add - // to the committted image. The map's keys are filesystem paths in the + // to the committed image. The map's keys are filesystem paths in the // image and the corresponding values are the paths of files whose // contents will be used in their place. The contents will be owned by // 0:0 and have mode 0644. Currently only accepts regular files. diff --git a/docs/buildah-build.1.md b/docs/buildah-build.1.md index 53abe6added..505925771e4 100644 --- a/docs/buildah-build.1.md +++ b/docs/buildah-build.1.md @@ -797,7 +797,7 @@ environment variable. `export BUILDAH_RUNTIME=/usr/bin/crun` **--runtime-flag** *flag* -Adds global flags for the container rutime. To list the supported flags, please +Adds global flags for the container runtime. To list the supported flags, please consult the manpages of the selected container runtime. Note: Do not pass the leading `--` to the flag. To pass the runc flag `--log-format json` diff --git a/imagebuildah/executor.go b/imagebuildah/executor.go index 8e11bc2a757..2f4bd1c0f55 100644 --- a/imagebuildah/executor.go +++ b/imagebuildah/executor.go @@ -492,7 +492,7 @@ func (b *Executor) buildStage(ctx context.Context, cleanupStages map[int]*StageE // to the Dockerfile that would provide the same result. // Reason: Docker adds label modification as a last step which can be // processed like regular steps, and if no modification is done to - // layers, its easier to re-use cached layers. + // layers, its easier to reuse cached layers. if len(b.labels) > 0 { var labelLine string labels := append([]string{}, b.labels...) diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index b5cf7bd8273..5fa4a830d94 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1524,7 +1524,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } } - // Note: If the build has squash, we must try to re-use as many layers as possible if cache is found. + // Note: If the build has squash, we must try to reuse as many layers as possible if cache is found. // So only perform commit if it's the lastInstruction of lastStage. if cacheID != "" { logCacheHit(cacheID) diff --git a/tests/bud.bats b/tests/bud.bats index 3ce72923803..5fbe001d283 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -969,7 +969,7 @@ FROM one RUN echo "target stage" _EOF - # with --skip-unused-stages=true no warning should be printed since ARG is decalred in stage which is not used + # with --skip-unused-stages=true no warning should be printed since ARG is declared in stage which is not used run_buildah build $WITH_POLICY_JSON --skip-unused-stages=true -t source -f $contextdir/Dockerfile expect_output --substring "needed stage" expect_output --substring "target stage" @@ -1984,7 +1984,7 @@ _EOF # Reuse cached layers and check if --output still works as expected run_buildah build --output type=local,dest=$mytmpdir/rootfs $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile . ls $mytmpdir/rootfs - # exported rootfs must contain only 'target' from last/final stage and not contain file `rouge` from first stage + # exported rootfs must contain only 'target' from last/final stage and not contain file `rogue` from first stage expect_output --substring 'target' # must not contain rogue from first stage assert "$output" =~ "rogue" @@ -2005,7 +2005,7 @@ _EOF # Reuse cached layers and check if --output still works as expected run_buildah build --output type=local,dest=$mytmpdir/rootfs $WITH_POLICY_JSON -t test-bud -f $mytmpdir/Containerfile . ls $mytmpdir/rootfs - # exported rootfs must contain only 'rouge' even if build from cache. + # exported rootfs must contain only 'rogue' even if build from cache. expect_output --substring 'rogue' } @@ -4615,7 +4615,7 @@ EOF # Build first in Docker format. Whether we do OCI or Docker first shouldn't matter, so we picked one. run_buildah build --iidfile ${TEST_SCRATCH_DIR}/first-docker --format docker --layers --quiet $WITH_POLICY_JSON $BUDFILES/cache-format - # Build in OCI format. Cache should not re-use the same images, so we should get a different image ID. + # Build in OCI format. Cache should not reuse the same images, so we should get a different image ID. run_buildah build --iidfile ${TEST_SCRATCH_DIR}/first-oci --format oci --layers --quiet $WITH_POLICY_JSON $BUDFILES/cache-format # Build in Docker format again. Cache traversal should 100% hit the Docker image, so we should get its image ID. From aa729810acb9efcd1a42b58df661afc91b3fd9df Mon Sep 17 00:00:00 2001 From: krumelmonster Date: Mon, 11 Dec 2023 16:22:21 +0100 Subject: [PATCH 17/24] docs: move footnotes to where they're applicable Followthrough on #5221, with thanks to @krumelmonster: move footnotes on divisive language to exactly where divisive language is used Signed-off-by: Ed Santiago Signed-off-by: tomsweeneyredhat --- docs/buildah-build.1.md | 3 +-- docs/buildah-from.1.md | 4 ++-- docs/buildah-run.1.md | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/buildah-build.1.md b/docs/buildah-build.1.md index 505925771e4..0fd01b009ba 100644 --- a/docs/buildah-build.1.md +++ b/docs/buildah-build.1.md @@ -1031,12 +1031,11 @@ Set the architecture variant of the image to be pulled. Mount a host directory into containers when executing *RUN* instructions during the build. The `OPTIONS` are a comma delimited list and can be: -[[1]](#Footnote1) * [rw|ro] * [U] * [z|Z|O] - * [`[r]shared`|`[r]slave`|`[r]private`] + * [`[r]shared`|`[r]slave`|`[r]private`] [[1]](#Footnote1) The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the diff --git a/docs/buildah-from.1.md b/docs/buildah-from.1.md index 9303877422e..8ba56e7c172 100644 --- a/docs/buildah-from.1.md +++ b/docs/buildah-from.1.md @@ -587,12 +587,12 @@ Set the architecture variant of the image to be pulled. Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Buildah bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Buildah - container. The `OPTIONS` are a comma delimited list and can be: [[1]](#Footnote1) + container. The `OPTIONS` are a comma delimited list and can be: * [rw|ro] * [U] * [z|Z|O] - * [`[r]shared`|`[r]slave`|`[r]private`|`[r]unbindable`] + * [`[r]shared`|`[r]slave`|`[r]private`|`[r]unbindable`] [[1]](#Footnote1) The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the diff --git a/docs/buildah-run.1.md b/docs/buildah-run.1.md index d96ddef3d0d..1162dd27737 100644 --- a/docs/buildah-run.1.md +++ b/docs/buildah-run.1.md @@ -99,7 +99,7 @@ BUILDAH\_ISOLATION environment variable. `export BUILDAH_ISOLATION=oci` Attach a filesystem mount to the container -Current supported mount TYPES are bind, cache, secret and tmpfs. [[1]](#Footnote1) +Current supported mount TYPES are bind, cache, secret and tmpfs. e.g. @@ -119,7 +119,7 @@ Current supported mount TYPES are bind, cache, secret and tmpfs. [[1]](#Foo Options specific to bind: - · bind-propagation: shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2). + · bind-propagation: shared, slave, private, rshared, rslave, or rprivate(default). See also mount(2). [[1]](#Footnote1) . bind-nonrecursive: do not setup a recursive bind mount. By default it is recursive. @@ -290,12 +290,12 @@ process. Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Buildah bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Buildah -container. The `OPTIONS` are a comma delimited list and can be: [[1]](#Footnote1) +container. The `OPTIONS` are a comma delimited list and can be: * [rw|ro] * [U] * [z|Z] - * [`[r]shared`|`[r]slave`|`[r]private`] + * [`[r]shared`|`[r]slave`|`[r]private`] [[1]](#Footnote1) The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` must be an absolute path as well. Buildah bind-mounts the `HOST-DIR` to the From 70fd07877581dac6afd1b8b89e6588f7574698d6 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 1 Feb 2024 17:14:13 +0100 Subject: [PATCH 18/24] imagebuildah: fix crash with empty RUN fix a crash when RUN is executed without any argument. Closes: https://github.com/containers/buildah/issues/5312 Signed-off-by: Giuseppe Scrivano Signed-off-by: tomsweeneyredhat --- imagebuildah/stage_executor.go | 8 ++++++-- tests/run.bats | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 5fa4a830d94..f130e45f34d 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1735,11 +1735,15 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri buildArgs := s.getBuildArgsKey() return "/bin/sh -c #(nop) ARG " + buildArgs case "RUN": + shArg := "" buildArgs := s.getBuildArgsResolvedForRun() + if len(node.Original) > 4 { + shArg = node.Original[4:] + } if buildArgs != "" { - return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + node.Original[4:] + return "|" + strconv.Itoa(len(strings.Split(buildArgs, " "))) + " " + buildArgs + " /bin/sh -c " + shArg } - result := "/bin/sh -c " + node.Original[4:] + result := "/bin/sh -c " + shArg if len(node.Heredocs) > 0 { for _, doc := range node.Heredocs { heredocContent := strings.TrimSpace(doc.Content) diff --git a/tests/run.bats b/tests/run.bats index dc40c081fea..43b84edc045 100644 --- a/tests/run.bats +++ b/tests/run.bats @@ -952,3 +952,18 @@ _EOF fi done } + +@test "empty run statement doesn't crash" { + skip_if_no_runtime + + _prefetch alpine + + cd ${TEST_SCRATCH_DIR} + + printf 'FROM alpine\nRUN \\\n echo && echo' > Dockerfile + run_buildah bud --pull=false --layers . + + printf 'FROM alpine\nRUN\n echo && echo' > Dockerfile + run_buildah ? bud --pull=false --layers . + expect_output --substring -- "-c requires an argument" +} From 6a8e296ea5d8ff57ea22fff0760fd9573a3fe0f5 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Wed, 17 Jan 2024 11:03:06 -0500 Subject: [PATCH 19/24] Make buildah match podman for handling of ulimits Podman currently sets the ulimits of nofile and nproc to max in rootless mode, if the user does not override. Buildah on the other hand just passes in the current defaults. Podman build should match podman run, and this will fix that problem. Fixes: https://github.com/containers/buildah/issues/5273 Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- define/types.go | 3 +++ run_linux.go | 34 ++++++++++++++++++++++++++++++ tests/bud.bats | 15 +++++++++++++ tests/bud/bud.limits/Containerfile | 4 ++++ tests/run.bats | 19 ++++++++++------- 5 files changed, 67 insertions(+), 8 deletions(-) create mode 100644 tests/bud/bud.limits/Containerfile diff --git a/define/types.go b/define/types.go index 250dfbcbbec..1aff0ecc5a7 100644 --- a/define/types.go +++ b/define/types.go @@ -54,6 +54,9 @@ const ( SNP TeeType = "snp" ) +// DefaultRlimitValue is the value set by default for nofile and nproc +const RLimitDefaultValue = uint64(1048576) + // TeeType is a supported trusted execution environment type. type TeeType string diff --git a/run_linux.go b/run_linux.go index 4630ac5efcc..c96fc0bffbe 100644 --- a/run_linux.go +++ b/run_linux.go @@ -854,6 +854,9 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) var ( ul *units.Ulimit err error + // setup rlimits + nofileSet bool + nprocSet bool ) ulimit = append(defaultUlimits, ulimit...) @@ -862,8 +865,39 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) } + if strings.ToUpper(ul.Name) == "NOFILE" { + nofileSet = true + } + if strings.ToUpper(ul.Name) == "NPROC" { + nprocSet = true + } g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft)) } + if !nofileSet { + max := define.RLimitDefaultValue + var rlimit unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err == nil { + if max < rlimit.Max || unshare.IsRootless() { + max = rlimit.Max + } + } else { + logrus.Warnf("Failed to return RLIMIT_NOFILE ulimit %q", err) + } + g.AddProcessRlimits("RLIMIT_NOFILE", max, max) + } + if !nprocSet { + max := define.RLimitDefaultValue + var rlimit unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlimit); err == nil { + if max < rlimit.Max || unshare.IsRootless() { + max = rlimit.Max + } + } else { + logrus.Warnf("Failed to return RLIMIT_NPROC ulimit %q", err) + } + g.AddProcessRlimits("RLIMIT_NPROC", max, max) + } + return nil } diff --git a/tests/bud.bats b/tests/bud.bats index 5fbe001d283..097537dbcd2 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -6602,3 +6602,18 @@ _EOF expect_output --substring "localhost/foo/bar" expect_output --substring "localhost/bar" } + +@test "build test default ulimits" { + skip_if_no_runtime + _prefetch alpine + + run podman run --rm alpine sh -c "echo -n Files=; awk '/open files/{print \$4 \"/\" \$5}' /proc/self/limits" + podman_files=$output + + run podman run --rm alpine sh -c "echo -n Processes=; awk '/processes/{print \$3 \"/\" \$4}' /proc/self/limits" + podman_processes=$output + + CONTAINERS_CONF=/dev/null run_buildah build --no-cache --pull=false $WITH_POLICY_JSON -t foo/bar $BUDFILES/bud.limits + expect_output --substring "$podman_files" + expect_output --substring "$podman_processes" +} diff --git a/tests/bud/bud.limits/Containerfile b/tests/bud/bud.limits/Containerfile new file mode 100644 index 00000000000..7d6eedd3586 --- /dev/null +++ b/tests/bud/bud.limits/Containerfile @@ -0,0 +1,4 @@ +# Containerfile +FROM alpine +RUN echo -n "Files="; awk '/open files/{print $4 "/" $5}' /proc/self/limits +RUN echo -n "Processes="; awk '/processes/{print $3 "/" $4}' /proc/self/limits diff --git a/tests/run.bats b/tests/run.bats index 43b84edc045..5e6f917407e 100644 --- a/tests/run.bats +++ b/tests/run.bats @@ -510,7 +510,6 @@ function configure_and_check_user() { } @test "Check if containers run with correct open files/processes limits" { - skip_if_rootless_environment skip_if_no_runtime # we need to not use the list of limits that are set in our default @@ -521,21 +520,25 @@ function configure_and_check_user() { export CONTAINERS_CONF=${TEST_SCRATCH_DIR}/containers.conf _prefetch alpine - maxpids=$(cat /proc/sys/kernel/pid_max) + + run podman run --rm alpine sh -c "awk '/open files/{print \$4 \"/\" \$5}' /proc/self/limits" + podman_files=$output + run_buildah from --quiet --pull=false $WITH_POLICY_JSON alpine cid=$output - run_buildah run $cid awk '/open files/{print $4}' /proc/self/limits - expect_output 1024 "limits: open files (unlimited)" - run_buildah run $cid awk '/processes/{print $3}' /proc/self/limits - expect_output ${maxpids} "limits: processes (unlimited)" + run_buildah run $cid awk '/open files/{print $4 "/" $5}' /proc/self/limits + expect_output "${podman_files}" "limits: podman and buildah should agree on open files" + + run podman run --rm alpine sh -c "awk '/processes/{print \$3 \"/\" \$4}' /proc/self/limits" + podman_processes=$output + run_buildah run $cid awk '/processes/{print $3 "/" $4}' /proc/self/limits + expect_output ${podman_processes} "processes should match podman" run_buildah rm $cid run_buildah from --quiet --ulimit nofile=300:400 --pull=false $WITH_POLICY_JSON alpine cid=$output run_buildah run $cid awk '/open files/{print $4}' /proc/self/limits expect_output "300" "limits: open files (w/file limit)" - run_buildah run $cid awk '/processes/{print $3}' /proc/self/limits - expect_output ${maxpids} "limits: processes (w/file limit)" run_buildah rm $cid run_buildah from --quiet --ulimit nproc=100:200 --ulimit nofile=300:400 --pull=false $WITH_POLICY_JSON alpine From be6b9c3f07558dd20b9d6a69f1b6644db1894445 Mon Sep 17 00:00:00 2001 From: James Fraser Date: Fri, 26 Jan 2024 07:50:33 +1100 Subject: [PATCH 20/24] docs: correct default authfile path Signed-off-by: James Fraser Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- docs/buildah-build.1.md | 4 ++-- docs/buildah-commit.1.md | 4 ++-- docs/buildah-from.1.md | 6 +++--- docs/buildah-login.1.md | 6 +++--- docs/buildah-logout.1.md | 6 +++--- docs/buildah-manifest-add.1.md | 4 ++-- docs/buildah-pull.1.md | 4 ++-- docs/buildah-push.1.md | 4 ++-- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/buildah-build.1.md b/docs/buildah-build.1.md index 0fd01b009ba..e557c7113cf 100644 --- a/docs/buildah-build.1.md +++ b/docs/buildah-build.1.md @@ -51,7 +51,7 @@ Set the ARCH of the image to be built, and that of the base image to be pulled, **--authfile** *path* -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. @@ -1318,7 +1318,7 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), cpp(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), crun(1), runc(8), containers.conf(5), oci-hooks(5), containers-transports(5) +buildah(1), cpp(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), crun(1), runc(8), containers.conf(5), oci-hooks(5), containers-transports(5), containers-auth.json(5) ## FOOTNOTES 1: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately. diff --git a/docs/buildah-commit.1.md b/docs/buildah-commit.1.md index fd87e5867ba..0d78982ee1a 100644 --- a/docs/buildah-commit.1.md +++ b/docs/buildah-commit.1.md @@ -31,7 +31,7 @@ is also specified. This option can be specified multiple times. **--authfile** *path* -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. @@ -273,4 +273,4 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), buildah-images(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5) +buildah(1), buildah-images(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5), containers-auth.json(5) diff --git a/docs/buildah-from.1.md b/docs/buildah-from.1.md index 8ba56e7c172..fd9ba297d77 100644 --- a/docs/buildah-from.1.md +++ b/docs/buildah-from.1.md @@ -17,7 +17,7 @@ Multiple transports are supported: An existing local directory _path_ containing the manifest, layer tarballs, and signatures in individual files. This is a non-standardized format, primarily useful for debugging or noninvasive image inspection. **docker://**_docker-reference_ (Default) - An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(buildah login)`. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`. + An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(buildah login)`. See containers-auth.json(5) for more information. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`. If _docker-reference_ does not include a registry name, *localhost* will be consulted first, followed by any registries named in the registries configuration. **docker-archive:**_path_ @@ -55,7 +55,7 @@ Set the ARCH of the image to be pulled to the provided value instead of using th **--authfile** *path* -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. @@ -737,7 +737,7 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), buildah-pull(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), containers.conf(5) +buildah(1), buildah-pull(1), buildah-login(1), docker-login(1), namespaces(7), pid\_namespaces(7), containers-policy.json(5), containers-registries.conf(5), user\_namespaces(7), containers.conf(5), containers-auth.json(5) ## FOOTNOTES 1: The Buildah project is committed to inclusivity, a core value of open source. The `master` and `slave` mount propagation terminology used here is problematic and divisive, and should be changed. However, these terms are currently used within the Linux kernel and must be used as-is at this time. When the kernel maintainers rectify this usage, Buildah will follow suit immediately. diff --git a/docs/buildah-login.1.md b/docs/buildah-login.1.md index 5eefe11bed3..bd6a81fc9c5 100644 --- a/docs/buildah-login.1.md +++ b/docs/buildah-login.1.md @@ -12,7 +12,7 @@ and password. **buildah login** reads in the username and password from STDIN. The username and password can also be set using the **username** and **password** flags. The path of the authentication file can be specified by the user by setting the **authfile** flag. The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. If XDG_RUNTIME_DIR -is not set, the default is /run/containers/$UID/auth.json. +is not set, the default is /run/user/$UID/containers/auth.json. **buildah [GLOBAL OPTIONS]** @@ -24,7 +24,7 @@ is not set, the default is /run/containers/$UID/auth.json. **--authfile** -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/user/$UID/containers/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE environment variable. `export REGISTRY_AUTH_FILE=path` @@ -111,4 +111,4 @@ Login Succeeded! ``` ## SEE ALSO -buildah(1), buildah-logout(1) +buildah(1), buildah-logout(1), containers-auth.json(5) diff --git a/docs/buildah-logout.1.md b/docs/buildah-logout.1.md index a3a80717b88..4a1d2729238 100644 --- a/docs/buildah-logout.1.md +++ b/docs/buildah-logout.1.md @@ -9,7 +9,7 @@ buildah\-logout - Logout of a container registry ## DESCRIPTION **buildah logout** logs out of a specified registry server by deleting the cached credentials stored in the **auth.json** file. The path of the authentication file can be overridden by the user by setting the **authfile** flag. -The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. +The default path used is **${XDG\_RUNTIME_DIR}/containers/auth.json**. See containers-auth.json(5) for more information. All the cached credentials can be removed by setting the **all** flag. **buildah [GLOBAL OPTIONS]** @@ -26,7 +26,7 @@ Remove the cached credentials for all registries in the auth file **--authfile** -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/user/$UID/containers/auth.json. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. Note: You can also override the default path of the authentication file by setting the REGISTRY\_AUTH\_FILE environment variable. `export REGISTRY_AUTH_FILE=path` @@ -57,4 +57,4 @@ Remove login credentials for all registries ``` ## SEE ALSO -buildah(1), buildah-login(1) +buildah(1), buildah-login(1), containers-auth.json(5) diff --git a/docs/buildah-manifest-add.1.md b/docs/buildah-manifest-add.1.md index 0f13d115a80..b63df248c56 100644 --- a/docs/buildah-manifest-add.1.md +++ b/docs/buildah-manifest-add.1.md @@ -38,7 +38,7 @@ retrieved from the image's configuration information. **--authfile** *path* -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. @@ -106,4 +106,4 @@ buildah manifest add --arch arm64 --variant v8 mylist:v1.11 docker://fedora@sha2 ``` ## SEE ALSO -buildah(1), buildah-login(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1), docker-login(1) +buildah(1), buildah-login(1), buildah-manifest(1), buildah-manifest-create(1), buildah-manifest-remove(1), buildah-manifest-annotate(1), buildah-manifest-inspect(1), buildah-manifest-push(1), buildah-rmi(1), docker-login(1), containers-auth.json(5) diff --git a/docs/buildah-pull.1.md b/docs/buildah-pull.1.md index 8eb10adc69d..775224dfda5 100644 --- a/docs/buildah-pull.1.md +++ b/docs/buildah-pull.1.md @@ -30,7 +30,7 @@ Set the ARCH of the image to be pulled to the provided value instead of using th **--authfile** *path* -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. @@ -159,4 +159,4 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), buildah-from(1), buildah-login(1), docker-login(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5) +buildah(1), buildah-from(1), buildah-login(1), docker-login(1), containers-policy.json(5), containers-registries.conf(5), containers-transports(5), containers-auth.json(5) diff --git a/docs/buildah-push.1.md b/docs/buildah-push.1.md index f83c90b4fda..9281b172be2 100644 --- a/docs/buildah-push.1.md +++ b/docs/buildah-push.1.md @@ -26,7 +26,7 @@ the list or index itself. **--authfile** *path* -Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. If XDG_RUNTIME_DIR is not set, the default is /run/containers/$UID/auth.json. This file is created using `buildah login`. +Path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json. See containers-auth.json(5) for more information. This file is created using `buildah login`. If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`. @@ -181,4 +181,4 @@ registries.conf is the configuration file which specifies which container regist Signature policy file. This defines the trust policy for container images. Controls which container registries can be used for image, and whether or not the tool should trust the images. ## SEE ALSO -buildah(1), buildah-login(1), containers-policy.json(5), docker-login(1), containers-registries.conf(5), buildah-manifest(1), containers-transports(5) +buildah(1), buildah-login(1), containers-policy.json(5), docker-login(1), containers-registries.conf(5), buildah-manifest(1), containers-transports(5), containers-auth.json(5) From 2a399d67b3f9ed8a4cb94af796436ba821765146 Mon Sep 17 00:00:00 2001 From: flouthoc Date: Thu, 1 Feb 2024 12:11:45 -0800 Subject: [PATCH 21/24] build, heredoc: show heredoc summary in build output Buildah must show summary of heredoc content in build output so its easy for developers to understand which heredoc got executed, this is similar to what buildkit does for heredoc content. See: https://github.com/moby/buildkit/blob/master/frontend/dockerfile/dockerfile2llb/convert.go#L1853 Sample output of buildah ```console STEP 1/5: FROM docker.io/library/python:latest STEP 2/5: RUN <> /hello...) STEP 3/5: RUN python3 < Signed-off-by: tomsweeneyredhat --- imagebuildah/stage_executor.go | 19 ++++++++++++++++++- tests/bud.bats | 7 +++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index f130e45f34d..7f5f8ceb7c5 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1217,7 +1217,24 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, } logrus.Debugf("Parsed Step: %+v", *step) if !s.executor.quiet { - s.log("%s", step.Original) + logMsg := step.Original + if len(step.Heredocs) > 0 { + summarizeHeredoc := func(doc string) string { + doc = strings.TrimSpace(doc) + lines := strings.Split(strings.ReplaceAll(doc, "\r\n", "\n"), "\n") + summary := lines[0] + if len(lines) > 1 { + summary += "..." + } + return summary + } + + for _, doc := range node.Heredocs { + heredocContent := summarizeHeredoc(doc.Content) + logMsg = logMsg + " (" + heredocContent + ")" + } + } + s.log("%s", logMsg) } // Check if there's a --from if the step command is COPY. diff --git a/tests/bud.bats b/tests/bud.bats index 097537dbcd2..606ea62875f 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -344,6 +344,13 @@ _EOF expect_output --substring "this is the output of test7 part3" expect_output --substring "this is the output of test8 part1" expect_output --substring "this is the output of test8 part2" + + # verify that build output contains summary of heredoc content + expect_output --substring 'RUN <> /file1...)' + expect_output --substring 'RUN python3 < Date: Fri, 2 Feb 2024 10:59:01 -0800 Subject: [PATCH 22/24] tests: retrofit test for heredoc summary Signed-off-by: flouthoc Signed-off-by: tomsweeneyredhat --- tests/bud.bats | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/bud.bats b/tests/bud.bats index 606ea62875f..0b9aed7b623 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -301,18 +301,19 @@ _EOF FROM alpine RUN <> /hello +echo "Cache burst second line" >> /hello EOF RUN cat hello _EOF # on first run since there is no cache so `Cache burst` must be printed run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile - expect_output --substring "Cache burst" + expect_output --substring "Cache burst second line" # on second run since there is cache so `Cache burst` should not be printed run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile # output should not contain cache burst - assert "$output" !~ "Cache burst" + assert "$output" !~ "Cache burst second line" cat > $contextdir/Dockerfile << _EOF FROM alpine @@ -324,7 +325,7 @@ _EOF # on third run since we have changed heredoc so `Cache burst` must be printed. run_buildah build $WITH_POLICY_JSON --layers -t source -f $contextdir/Dockerfile - expect_output --substring "Cache burst" + expect_output --substring "Cache burst add diff" } @test "bud build with heredoc content" { From ae557ac9ca1b7835730c74ba5bc05759b0c80a3e Mon Sep 17 00:00:00 2001 From: Doug Rabson Date: Thu, 15 Feb 2024 08:26:42 +0000 Subject: [PATCH 23/24] Build with CNI support on FreeBSD This is needed until there is netavark support on FreeBSD [NO NEW TESTS NEEDED] Signed-off-by: Doug Rabson Signed-off-by: tomsweeneyredhat --- Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Makefile b/Makefile index a100101fc45..f44ce956531 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,10 @@ APPARMORTAG := $(shell hack/apparmor_tag.sh) STORAGETAGS := exclude_graphdriver_devicemapper $(shell ./btrfs_tag.sh) $(shell ./btrfs_installed_tag.sh) $(shell ./hack/libsubid_tag.sh) SECURITYTAGS ?= seccomp $(APPARMORTAG) TAGS ?= $(SECURITYTAGS) $(STORAGETAGS) $(shell ./hack/systemd_tag.sh) +ifeq ($(shell uname -s),FreeBSD) +# FreeBSD needs CNI until netavark is supported +TAGS += cni +endif BUILDTAGS += $(TAGS) PREFIX := /usr/local BINDIR := $(PREFIX)/bin From 83a13686656be3d8ab1da563ab508a6975aef555 Mon Sep 17 00:00:00 2001 From: Aditya R Date: Wed, 30 Aug 2023 14:08:23 +0530 Subject: [PATCH 24/24] manifest: addCompression use default from containers.conf Replaces: https://github.com/containers/buildah/pull/5014 Signed-off-by: Aditya R Signed-off-by: Daniel J Walsh Signed-off-by: tomsweeneyredhat --- internal/mkcw/embed/entrypoint_amd64.gz | Bin 405 -> 393 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/internal/mkcw/embed/entrypoint_amd64.gz b/internal/mkcw/embed/entrypoint_amd64.gz index 980a5a65806dc6281745ffb689858c4e8dbc88c4..947bed9b688479e757c0a11db7076b8774310d7e 100755 GIT binary patch literal 393 zcmV;40e1c$iwFP!000021MQm6O2a@9#wRtcMGy7~WDg!?DV{utsy0(vY3IijB?bf^-&ndNGr}esWJU1T_G&Ho~uvO>VV`b;D z@`8pAZMxIG)th$}{ig0(*Y#j?+&1`jX~o`LT;1vQdTZaV+q|EqM-T)-5ClOG1VNBZ z80F=c`6$mjrMwj%(xd$KQoFsbzI$TsdZowxLJ$N&5ClOG1VNDh$n8XAL_D&X6MYf! zOvDL~h>~Q?NxDb~%LpU)GIx@F<&7V<^7}2n52lA+_2{er4~)=O ne!Jg(U}HxvQ2mEsB>bknu3KNME41~8KaBqYCdfACxFG-lYni%Z literal 405 zcmV;G0c!pqiwFp%I$vb~17&V>a(QrXX>N1??V7(%!!QuWFQF|J4D<<*2S#KeCI*DE z0F{W45byvlF=^CdSBWdi#4GfXDhBc-ya$f|gf<{im4W4clJn)e+{KQ!==^#fUxYyb zo)FH!xL#y@wEpZ!J>-!?F$y